^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Joerg Roedel <jroedel@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define pr_fmt(fmt) "iommu: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/bits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/property.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/fsl/mc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <trace/events/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static struct kset *iommu_group_kset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static DEFINE_IDA(iommu_group_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static unsigned int iommu_def_domain_type __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static bool iommu_dma_strict __read_mostly = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static u32 iommu_cmd_line __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct iommu_group {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct kobject kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct kobject *devices_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct list_head devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct mutex mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct blocking_notifier_head notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) void *iommu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) void (*iommu_data_release)(void *iommu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct iommu_domain *default_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct list_head entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct group_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct iommu_group_attribute {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct attribute attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) ssize_t (*show)(struct iommu_group *group, char *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) ssize_t (*store)(struct iommu_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) const char *buf, size_t count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static const char * const iommu_group_resv_type_string[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) [IOMMU_RESV_DIRECT] = "direct",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) [IOMMU_RESV_RESERVED] = "reserved",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) [IOMMU_RESV_MSI] = "msi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) [IOMMU_RESV_SW_MSI] = "msi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define IOMMU_CMD_LINE_DMA_API BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void iommu_set_cmd_line_dma_api(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static bool iommu_cmd_line_dma_api(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static int iommu_alloc_default_domain(struct iommu_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) unsigned type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static int __iommu_attach_device(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static int __iommu_attach_group(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct iommu_group *group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void __iommu_detach_group(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct iommu_group *group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static int iommu_create_device_direct_mappings(struct iommu_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct iommu_group_attribute iommu_group_attr_##_name = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) __ATTR(_name, _mode, _show, _store)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define to_iommu_group_attr(_attr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) container_of(_attr, struct iommu_group_attribute, attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define to_iommu_group(_kobj) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) container_of(_kobj, struct iommu_group, kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static LIST_HEAD(iommu_device_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static DEFINE_SPINLOCK(iommu_device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * Use a function instead of an array here because the domain-type is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * bit-field, so an array would waste memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static const char *iommu_domain_type_str(unsigned int t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) switch (t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) case IOMMU_DOMAIN_BLOCKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return "Blocked";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) case IOMMU_DOMAIN_IDENTITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return "Passthrough";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) case IOMMU_DOMAIN_UNMANAGED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return "Unmanaged";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) case IOMMU_DOMAIN_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return "Translated";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static int __init iommu_subsys_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) bool cmd_line = iommu_cmd_line_dma_api();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (!cmd_line) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) iommu_set_default_passthrough(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) iommu_set_default_translated(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (iommu_default_passthrough() && mem_encrypt_active()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) iommu_set_default_translated(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) pr_info("Default domain type: %s %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) iommu_domain_type_str(iommu_def_domain_type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) cmd_line ? "(set via kernel command line)" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) subsys_initcall(iommu_subsys_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) int iommu_device_register(struct iommu_device *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) spin_lock(&iommu_device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) list_add_tail(&iommu->list, &iommu_device_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) spin_unlock(&iommu_device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) EXPORT_SYMBOL_GPL(iommu_device_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) void iommu_device_unregister(struct iommu_device *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) spin_lock(&iommu_device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) list_del(&iommu->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) spin_unlock(&iommu_device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) EXPORT_SYMBOL_GPL(iommu_device_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static struct dev_iommu *dev_iommu_get(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct dev_iommu *param = dev->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) param = kzalloc(sizeof(*param), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (!param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) mutex_init(¶m->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) dev->iommu = param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static void dev_iommu_free(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct dev_iommu *param = dev->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) dev->iommu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (param->fwspec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) fwnode_handle_put(param->fwspec->iommu_fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) kfree(param->fwspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) kfree(param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) const struct iommu_ops *ops = dev->bus->iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct iommu_device *iommu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (!dev_iommu_get(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (!try_module_get(ops->owner)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) iommu_dev = ops->probe_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (IS_ERR(iommu_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ret = PTR_ERR(iommu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) goto out_module_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) dev->iommu->iommu_dev = iommu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) group = iommu_group_get_for_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (IS_ERR(group)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) ret = PTR_ERR(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) goto out_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) iommu_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (group_list && !group->default_domain && list_empty(&group->entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) list_add_tail(&group->entry, group_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) iommu_device_link(iommu_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) out_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ops->release_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) out_module_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) module_put(ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) dev_iommu_free(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int iommu_probe_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) const struct iommu_ops *ops = dev->bus->iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ret = __iommu_probe_device(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) group = iommu_group_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) goto err_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * Try to allocate a default domain - needs support from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * IOMMU driver. There are still some drivers which don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * support default domains, so the return value is not yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) mutex_lock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) iommu_alloc_default_domain(group, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (group->default_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) ret = __iommu_attach_device(group->default_domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) mutex_unlock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) iommu_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) goto err_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) iommu_create_device_direct_mappings(group, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) mutex_unlock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) iommu_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (ops->probe_finalize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ops->probe_finalize(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) err_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) iommu_release_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) void iommu_release_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) const struct iommu_ops *ops = dev->bus->iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!dev->iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) iommu_device_unlink(dev->iommu->iommu_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ops->release_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) iommu_group_remove_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) module_put(ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) dev_iommu_free(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static int __init iommu_set_def_domain_type(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) bool pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) ret = kstrtobool(str, &pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) iommu_set_default_passthrough(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) iommu_set_default_translated(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) early_param("iommu.passthrough", iommu_set_def_domain_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static int __init iommu_dma_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return kstrtobool(str, &iommu_dma_strict);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) early_param("iommu.strict", iommu_dma_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static ssize_t iommu_group_attr_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct attribute *__attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct iommu_group *group = to_iommu_group(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ssize_t ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (attr->show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) ret = attr->show(group, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static ssize_t iommu_group_attr_store(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct attribute *__attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct iommu_group *group = to_iommu_group(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ssize_t ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (attr->store)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) ret = attr->store(group, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static const struct sysfs_ops iommu_group_sysfs_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) .show = iommu_group_attr_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) .store = iommu_group_attr_store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static int iommu_group_create_file(struct iommu_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct iommu_group_attribute *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return sysfs_create_file(&group->kobj, &attr->attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static void iommu_group_remove_file(struct iommu_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct iommu_group_attribute *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) sysfs_remove_file(&group->kobj, &attr->attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return sprintf(buf, "%s\n", group->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * iommu_insert_resv_region - Insert a new region in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * list of reserved regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * @new: new region to insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * @regions: list of regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * Elements are sorted by start address and overlapping segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * of the same type are merged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static int iommu_insert_resv_region(struct iommu_resv_region *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct list_head *regions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct iommu_resv_region *iter, *tmp, *nr, *top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) LIST_HEAD(stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) nr = iommu_alloc_resv_region(new->start, new->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) new->prot, new->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (!nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* First add the new element based on start address sorting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) list_for_each_entry(iter, regions, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (nr->start < iter->start ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) (nr->start == iter->start && nr->type <= iter->type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) list_add_tail(&nr->list, &iter->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /* Merge overlapping segments of type nr->type in @regions, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) list_for_each_entry_safe(iter, tmp, regions, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* no merge needed on elements of different types than @new */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (iter->type != new->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) list_move_tail(&iter->list, &stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* look for the last stack element of same type as @iter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) list_for_each_entry_reverse(top, &stack, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (top->type == iter->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) goto check_overlap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) list_move_tail(&iter->list, &stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) check_overlap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) top_end = top->start + top->length - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (iter->start > top_end + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) list_move_tail(&iter->list, &stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) top->length = max(top_end, iter_end) - top->start + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) list_del(&iter->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) kfree(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) list_splice(&stack, regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct list_head *group_resv_regions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct iommu_resv_region *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) list_for_each_entry(entry, dev_resv_regions, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ret = iommu_insert_resv_region(entry, group_resv_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int iommu_get_group_resv_regions(struct iommu_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct group_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) mutex_lock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) list_for_each_entry(device, &group->devices, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct list_head dev_resv_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) INIT_LIST_HEAD(&dev_resv_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) iommu_get_resv_regions(device->dev, &dev_resv_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) iommu_put_resv_regions(device->dev, &dev_resv_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) mutex_unlock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct iommu_resv_region *region, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct list_head group_resv_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) char *str = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) INIT_LIST_HEAD(&group_resv_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) iommu_get_group_resv_regions(group, &group_resv_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) list_for_each_entry_safe(region, next, &group_resv_regions, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) str += sprintf(str, "0x%016llx 0x%016llx %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) (long long int)region->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) (long long int)(region->start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) region->length - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) iommu_group_resv_type_string[region->type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) kfree(region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return (str - buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static ssize_t iommu_group_show_type(struct iommu_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) char *type = "unknown\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (group->default_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) switch (group->default_domain->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) case IOMMU_DOMAIN_BLOCKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) type = "blocked\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) case IOMMU_DOMAIN_IDENTITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) type = "identity\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) case IOMMU_DOMAIN_UNMANAGED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) type = "unmanaged\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) case IOMMU_DOMAIN_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) type = "DMA\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) strcpy(buf, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return strlen(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static IOMMU_GROUP_ATTR(reserved_regions, 0444,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) iommu_group_show_resv_regions, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static void iommu_group_release(struct kobject *kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct iommu_group *group = to_iommu_group(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) pr_debug("Releasing group %d\n", group->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (group->iommu_data_release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) group->iommu_data_release(group->iommu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) ida_simple_remove(&iommu_group_ida, group->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (group->default_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) iommu_domain_free(group->default_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) kfree(group->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) kfree(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static struct kobj_type iommu_group_ktype = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) .sysfs_ops = &iommu_group_sysfs_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) .release = iommu_group_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * iommu_group_alloc - Allocate a new group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * This function is called by an iommu driver to allocate a new iommu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * group. The iommu group represents the minimum granularity of the iommu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * Upon successful return, the caller holds a reference to the supplied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * group in order to hold the group until devices are added. Use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * iommu_group_put() to release this extra reference count, allowing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * group to be automatically reclaimed once it has no devices or external
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * references.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct iommu_group *iommu_group_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) group = kzalloc(sizeof(*group), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) group->kobj.kset = iommu_group_kset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) mutex_init(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) INIT_LIST_HEAD(&group->devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) INIT_LIST_HEAD(&group->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) kfree(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) group->id = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) NULL, "%d", group->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ida_simple_remove(&iommu_group_ida, group->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) kobject_put(&group->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (!group->devices_kobj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) kobject_put(&group->kobj); /* triggers .release & free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * The devices_kobj holds a reference on the group kobject, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * as long as that exists so will the group. We can therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * use the devices_kobj for reference counting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) kobject_put(&group->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) ret = iommu_group_create_file(group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) &iommu_group_attr_reserved_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) ret = iommu_group_create_file(group, &iommu_group_attr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) pr_debug("Allocated group %d\n", group->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) EXPORT_SYMBOL_GPL(iommu_group_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct iommu_group *iommu_group_get_by_id(int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct kobject *group_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (!iommu_group_kset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) name = kasprintf(GFP_KERNEL, "%d", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) group_kobj = kset_find_obj(iommu_group_kset, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (!group_kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) group = container_of(group_kobj, struct iommu_group, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) BUG_ON(group->id != id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) kobject_get(group->devices_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) kobject_put(&group->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * iommu_group_get_iommudata - retrieve iommu_data registered for a group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * @group: the group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * iommu drivers can store data in the group for use when doing iommu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * operations. This function provides a way to retrieve it. Caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * should hold a group reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) void *iommu_group_get_iommudata(struct iommu_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return group->iommu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * iommu_group_set_iommudata - set iommu_data for a group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * @group: the group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * @iommu_data: new data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * @release: release function for iommu_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * iommu drivers can store data in the group for use when doing iommu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * operations. This function provides a way to set the data after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * the group has been allocated. Caller should hold a group reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) void (*release)(void *iommu_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) group->iommu_data = iommu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) group->iommu_data_release = release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * iommu_group_set_name - set name for a group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * @group: the group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * @name: name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * Allow iommu driver to set a name for a group. When set it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * appear in a name attribute file under the group in sysfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) int iommu_group_set_name(struct iommu_group *group, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (group->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) iommu_group_remove_file(group, &iommu_group_attr_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) kfree(group->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) group->name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) group->name = kstrdup(name, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (!group->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) ret = iommu_group_create_file(group, &iommu_group_attr_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) kfree(group->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) group->name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) EXPORT_SYMBOL_GPL(iommu_group_set_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static int iommu_create_device_direct_mappings(struct iommu_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct iommu_domain *domain = group->default_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct iommu_resv_region *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct list_head mappings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) unsigned long pg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (!domain || domain->type != IOMMU_DOMAIN_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) BUG_ON(!domain->pgsize_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) pg_size = 1UL << __ffs(domain->pgsize_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) INIT_LIST_HEAD(&mappings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) iommu_get_resv_regions(dev, &mappings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /* We need to consider overlapping regions for different devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) list_for_each_entry(entry, &mappings, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) dma_addr_t start, end, addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (domain->ops->apply_resv_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) domain->ops->apply_resv_region(dev, domain, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) start = ALIGN(entry->start, pg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) end = ALIGN(entry->start + entry->length, pg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (entry->type != IOMMU_RESV_DIRECT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) for (addr = start; addr < end; addr += pg_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) phys_addr_t phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) phys_addr = iommu_iova_to_phys(domain, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) iommu_flush_iotlb_all(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) iommu_put_resv_regions(dev, &mappings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) static bool iommu_is_attach_deferred(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (domain->ops->is_attach_deferred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return domain->ops->is_attach_deferred(domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * iommu_group_add_device - add a device to an iommu group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * @group: the group into which to add the device (reference should be held)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * @dev: the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * This function is called by an iommu driver to add a device into a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * group. Adding a device increments the group reference count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) int iommu_group_add_device(struct iommu_group *group, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) int ret, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct group_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) device = kzalloc(sizeof(*device), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (!device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) device->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) goto err_free_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) rename:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (!device->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) goto err_remove_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) ret = sysfs_create_link_nowarn(group->devices_kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) &dev->kobj, device->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (ret == -EEXIST && i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * Account for the slim chance of collision
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * and append an instance to the name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) kfree(device->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) device->name = kasprintf(GFP_KERNEL, "%s.%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) kobject_name(&dev->kobj), i++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) goto rename;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) goto err_free_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) kobject_get(group->devices_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) dev->iommu_group = group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) mutex_lock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) list_add_tail(&device->list, &group->devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (group->domain && !iommu_is_attach_deferred(group->domain, dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) ret = __iommu_attach_device(group->domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) mutex_unlock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) goto err_put_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /* Notify any listeners about change to group. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) blocking_notifier_call_chain(&group->notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) trace_add_device_to_group(group->id, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) dev_info(dev, "Adding to iommu group %d\n", group->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) err_put_group:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) mutex_lock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) list_del(&device->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) mutex_unlock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) dev->iommu_group = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) kobject_put(group->devices_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) sysfs_remove_link(group->devices_kobj, device->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) err_free_name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) kfree(device->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) err_remove_link:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) sysfs_remove_link(&dev->kobj, "iommu_group");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) err_free_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) kfree(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) EXPORT_SYMBOL_GPL(iommu_group_add_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * iommu_group_remove_device - remove a device from it's current group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * @dev: device to be removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * This function is called by an iommu driver to remove the device from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * it's current group. This decrements the iommu group reference count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) void iommu_group_remove_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) struct iommu_group *group = dev->iommu_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct group_device *tmp_device, *device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) dev_info(dev, "Removing from iommu group %d\n", group->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /* Pre-notify listeners that a device is being removed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) blocking_notifier_call_chain(&group->notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) mutex_lock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) list_for_each_entry(tmp_device, &group->devices, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (tmp_device->dev == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) device = tmp_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) list_del(&device->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) mutex_unlock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (!device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) sysfs_remove_link(group->devices_kobj, device->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) sysfs_remove_link(&dev->kobj, "iommu_group");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) trace_remove_device_from_group(group->id, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) kfree(device->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) kfree(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) dev->iommu_group = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) kobject_put(group->devices_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) EXPORT_SYMBOL_GPL(iommu_group_remove_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) static int iommu_group_device_count(struct iommu_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct group_device *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) list_for_each_entry(entry, &group->devices, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * iommu_group_for_each_dev - iterate over each device in the group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * @group: the group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * @data: caller opaque data to be passed to callback function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * @fn: caller supplied callback function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * This function is called by group users to iterate over group devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * Callers should hold a reference count to the group during callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * The group->mutex is held across callbacks, which will block calls to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * iommu_group_add/remove_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) int (*fn)(struct device *, void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct group_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) list_for_each_entry(device, &group->devices, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) ret = fn(device->dev, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) int iommu_group_for_each_dev(struct iommu_group *group, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) int (*fn)(struct device *, void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) mutex_lock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) ret = __iommu_group_for_each_dev(group, data, fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) mutex_unlock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * iommu_group_get - Return the group for a device and increment reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * @dev: get the group that this device belongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * This function is called by iommu drivers and users to get the group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * for the specified device. If found, the group is returned and the group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * reference in incremented, else NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) struct iommu_group *iommu_group_get(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) struct iommu_group *group = dev->iommu_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) kobject_get(group->devices_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) EXPORT_SYMBOL_GPL(iommu_group_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * iommu_group_ref_get - Increment reference on a group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * @group: the group to use, must not be NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * This function is called by iommu drivers to take additional references on an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * existing group. Returns the given group for convenience.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) kobject_get(group->devices_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) EXPORT_SYMBOL_GPL(iommu_group_ref_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * iommu_group_put - Decrement group reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * @group: the group to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * This function is called by iommu drivers and users to release the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * iommu group. Once the reference count is zero, the group is released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) void iommu_group_put(struct iommu_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) kobject_put(group->devices_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) EXPORT_SYMBOL_GPL(iommu_group_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * iommu_group_register_notifier - Register a notifier for group changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * @group: the group to watch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * @nb: notifier block to signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * This function allows iommu group users to track changes in a group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * See include/linux/iommu.h for actions sent via this notifier. Caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * should hold a reference to the group throughout notifier registration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) int iommu_group_register_notifier(struct iommu_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return blocking_notifier_chain_register(&group->notifier, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * iommu_group_unregister_notifier - Unregister a notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * @group: the group to watch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * @nb: notifier block to signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * Unregister a previously registered group notifier block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) int iommu_group_unregister_notifier(struct iommu_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return blocking_notifier_chain_unregister(&group->notifier, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * iommu_register_device_fault_handler() - Register a device fault handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * @dev: the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * @handler: the fault handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * @data: private data passed as argument to the handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * When an IOMMU fault event is received, this handler gets called with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * fault event and data as argument. The handler should return 0 on success. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * complete the fault by calling iommu_page_response() with one of the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * response code:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * - IOMMU_PAGE_RESP_INVALID: terminate the fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * page faults if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * Return 0 if the fault handler was installed successfully, or an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) int iommu_register_device_fault_handler(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) iommu_dev_fault_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) struct dev_iommu *param = dev->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (!param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) mutex_lock(¶m->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /* Only allow one fault handler registered for each device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (param->fault_param) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) goto done_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) get_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (!param->fault_param) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) goto done_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) param->fault_param->handler = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) param->fault_param->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) mutex_init(¶m->fault_param->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) INIT_LIST_HEAD(¶m->fault_param->faults);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) done_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) mutex_unlock(¶m->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * iommu_unregister_device_fault_handler() - Unregister the device fault handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * @dev: the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * Remove the device fault handler installed with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * iommu_register_device_fault_handler().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * Return 0 on success, or an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) int iommu_unregister_device_fault_handler(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct dev_iommu *param = dev->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (!param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) mutex_lock(¶m->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (!param->fault_param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /* we cannot unregister handler if there are pending faults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (!list_empty(¶m->fault_param->faults)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) kfree(param->fault_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) param->fault_param = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) mutex_unlock(¶m->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * iommu_report_device_fault() - Report fault event to device driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * @dev: the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * @evt: fault event data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * handler. When this function fails and the fault is recoverable, it is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * caller's responsibility to complete the fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * Return 0 on success, or an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) struct dev_iommu *param = dev->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) struct iommu_fault_event *evt_pending = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) struct iommu_fault_param *fparam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (!param || !evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /* we only report device fault if there is a handler registered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) mutex_lock(¶m->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) fparam = param->fault_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (!fparam || !fparam->handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) goto done_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (!evt_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) goto done_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) mutex_lock(&fparam->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) list_add_tail(&evt_pending->list, &fparam->faults);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) mutex_unlock(&fparam->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) ret = fparam->handler(&evt->fault, fparam->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (ret && evt_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) mutex_lock(&fparam->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) list_del(&evt_pending->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) mutex_unlock(&fparam->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) kfree(evt_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) done_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) mutex_unlock(¶m->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) EXPORT_SYMBOL_GPL(iommu_report_device_fault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) int iommu_page_response(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) struct iommu_page_response *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) bool needs_pasid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) struct iommu_fault_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) struct iommu_fault_page_request *prm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct dev_iommu *param = dev->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (!domain || !domain->ops->page_response)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (!param || !param->fault_param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) /* Only send response if there is a fault report pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) mutex_lock(¶m->fault_param->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (list_empty(¶m->fault_param->faults)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) goto done_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * Check if we have a matching page request pending to respond,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * otherwise return -EINVAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) list_for_each_entry(evt, ¶m->fault_param->faults, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) prm = &evt->fault.prm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (prm->grpid != msg->grpid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * If the PASID is required, the corresponding request is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * matched using the group ID, the PASID valid bit and the PASID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * value. Otherwise only the group ID matches request and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (!needs_pasid && has_pasid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /* No big deal, just clear it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) msg->pasid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) ret = domain->ops->page_response(dev, evt, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) list_del(&evt->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) kfree(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) done_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) mutex_unlock(¶m->fault_param->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) EXPORT_SYMBOL_GPL(iommu_page_response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * iommu_group_id - Return ID for a group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * @group: the group to ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * Return the unique ID for the group matching the sysfs group number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) int iommu_group_id(struct iommu_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) return group->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) EXPORT_SYMBOL_GPL(iommu_group_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) unsigned long *devfns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * To consider a PCI device isolated, we require ACS to support Source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * Validation, Request Redirection, Completer Redirection, and Upstream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * Forwarding. This effectively means that devices cannot spoof their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * requester ID, requests and completions cannot be redirected, and all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * transactions are forwarded upstream, even as it passes through a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * bridge where the target device is downstream.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * For multifunction devices which are not isolated from each other, find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * all the other non-isolated functions and look for existing groups. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * each function, we also need to look for aliases to or from other devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * that may already have a group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) unsigned long *devfns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) struct pci_dev *tmp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) for_each_pci_dev(tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (tmp == pdev || tmp->bus != pdev->bus ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) pci_acs_enabled(tmp, REQ_ACS_FLAGS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) group = get_pci_alias_group(tmp, devfns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) pci_dev_put(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * Look for aliases to or from the given device for existing groups. DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * aliases are only supported on the same bus, therefore the search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * space is quite small (especially since we're really only looking at pcie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * device, and therefore only expect multiple slots on the root complex or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * downstream switch ports). It's conceivable though that a pair of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * multifunction devices could have aliases between them that would cause a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * loop. To prevent this, we use a bitmap to track where we've been.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) unsigned long *devfns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) struct pci_dev *tmp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (test_and_set_bit(pdev->devfn & 0xff, devfns))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) group = iommu_group_get(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) for_each_pci_dev(tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (tmp == pdev || tmp->bus != pdev->bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) /* We alias them or they alias us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (pci_devs_are_dma_aliases(pdev, tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) group = get_pci_alias_group(tmp, devfns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) if (group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) pci_dev_put(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) group = get_pci_function_alias_group(tmp, devfns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) pci_dev_put(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) struct group_for_pci_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * DMA alias iterator callback, return the last seen device. Stop and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * the IOMMU group if we find one along the way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) struct group_for_pci_data *data = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) data->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) data->group = iommu_group_get(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return data->group != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * Generic device_group call-back function. It just allocates one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) * iommu-group per device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct iommu_group *generic_device_group(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) return iommu_group_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) EXPORT_SYMBOL_GPL(generic_device_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * Use standard PCI bus topology, isolation features, and DMA alias quirks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * to find or create an IOMMU group for a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) struct iommu_group *pci_device_group(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct pci_dev *pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) struct group_for_pci_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct pci_bus *bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) struct iommu_group *group = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) u64 devfns[4] = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (WARN_ON(!dev_is_pci(dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) * Find the upstream DMA alias for the device. A device must not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * be aliased due to topology in order to have its own IOMMU group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * If we find an alias along the way that already belongs to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) * group, use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return data.group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) pdev = data.pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * Continue upstream from the point of minimum IOMMU granularity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * due to aliases to the point where devices are protected from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * peer-to-peer DMA by PCI ACS. Again, if we find an existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * group, use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (!bus->self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) pdev = bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) group = iommu_group_get(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * Look for existing groups on device aliases. If we alias another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * device or another device aliases us, use the same group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) group = get_pci_alias_group(pdev, (unsigned long *)devfns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * Look for existing groups on non-isolated functions on the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * slot and aliases of those funcions, if any. No need to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * the search bitmap, the tested devfns are still valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) /* No shared group found, allocate new */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) return iommu_group_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) EXPORT_SYMBOL_GPL(pci_device_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) /* Get the IOMMU group for device on fsl-mc bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) struct iommu_group *fsl_mc_device_group(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) struct device *cont_dev = fsl_mc_cont_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) group = iommu_group_get(cont_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) group = iommu_group_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) EXPORT_SYMBOL_GPL(fsl_mc_device_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) static int iommu_get_def_domain_type(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) const struct iommu_ops *ops = dev->bus->iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) unsigned int type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (ops->def_domain_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) type = ops->def_domain_type(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return (type == 0) ? iommu_def_domain_type : type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) static int iommu_group_alloc_default_domain(struct bus_type *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) struct iommu_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) struct iommu_domain *dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) dom = __iommu_domain_alloc(bus, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (!dom && type != IOMMU_DOMAIN_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) type, group->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (!dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) group->default_domain = dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (!group->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) group->domain = dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (!iommu_dma_strict) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) int attr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) iommu_domain_set_attr(dom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) static int iommu_alloc_default_domain(struct iommu_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) unsigned int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (group->default_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) type = iommu_get_def_domain_type(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) return iommu_group_alloc_default_domain(dev->bus, group, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * iommu_group_get_for_dev - Find or create the IOMMU group for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * @dev: target device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) * This function is intended to be called by IOMMU drivers and extended to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * support common, bus-defined algorithms when determining or creating the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * IOMMU group for a device. On success, the caller will hold a reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * to the returned IOMMU group, which will already include the provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) * device. The reference should be released with iommu_group_put().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) const struct iommu_ops *ops = dev->bus->iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) group = iommu_group_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) group = ops->device_group(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) if (WARN_ON_ONCE(group == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (IS_ERR(group))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) ret = iommu_group_add_device(group, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) goto out_put_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) out_put_group:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) iommu_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) return group->default_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) static int probe_iommu_group(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) struct list_head *group_list = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) /* Device is probed already if in a group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) group = iommu_group_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) iommu_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) ret = __iommu_probe_device(dev, group_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (ret == -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) static int remove_iommu_group(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) iommu_release_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) static int iommu_bus_notifier(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) unsigned long action, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) unsigned long group_action = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) struct device *dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) * ADD/DEL call into iommu driver ops if provided, which may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) * result in ADD/DEL notifiers to group->notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (action == BUS_NOTIFY_ADD_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) ret = iommu_probe_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) return (ret) ? NOTIFY_DONE : NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) iommu_release_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) * Remaining BUS_NOTIFYs get filtered and republished to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) * group, if anyone is listening
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) group = iommu_group_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) switch (action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) case BUS_NOTIFY_BIND_DRIVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) case BUS_NOTIFY_BOUND_DRIVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) case BUS_NOTIFY_UNBIND_DRIVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) case BUS_NOTIFY_UNBOUND_DRIVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (group_action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) blocking_notifier_call_chain(&group->notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) group_action, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) iommu_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) struct __group_domain_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) unsigned int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) static int probe_get_default_domain_type(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) const struct iommu_ops *ops = dev->bus->iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) struct __group_domain_type *gtype = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) unsigned int type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) if (ops->def_domain_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) type = ops->def_domain_type(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (gtype->type && gtype->type != type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) iommu_domain_type_str(type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) dev_name(gtype->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) iommu_domain_type_str(gtype->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) gtype->type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (!gtype->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) gtype->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) gtype->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) static void probe_alloc_default_domain(struct bus_type *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) struct iommu_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) struct __group_domain_type gtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) memset(>ype, 0, sizeof(gtype));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) /* Ask for default domain requirements of all devices in the group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) __iommu_group_for_each_dev(group, >ype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) probe_get_default_domain_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (!gtype.type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) gtype.type = iommu_def_domain_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) iommu_group_alloc_default_domain(bus, group, gtype.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) static int iommu_group_do_dma_attach(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) struct iommu_domain *domain = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) if (!iommu_is_attach_deferred(domain, dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) ret = __iommu_attach_device(domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) static int __iommu_group_dma_attach(struct iommu_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) return __iommu_group_for_each_dev(group, group->default_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) iommu_group_do_dma_attach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) static int iommu_group_do_probe_finalize(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) struct iommu_domain *domain = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) if (domain->ops->probe_finalize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) domain->ops->probe_finalize(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) static void __iommu_group_dma_finalize(struct iommu_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) __iommu_group_for_each_dev(group, group->default_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) iommu_group_do_probe_finalize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) static int iommu_do_create_direct_mappings(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) struct iommu_group *group = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) iommu_create_device_direct_mappings(group, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) static int iommu_group_create_direct_mappings(struct iommu_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) return __iommu_group_for_each_dev(group, group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) iommu_do_create_direct_mappings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) int bus_iommu_probe(struct bus_type *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) struct iommu_group *group, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) LIST_HEAD(group_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) * This code-path does not allocate the default domain when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) * creating the iommu group, so do it after the groups are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) * created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) list_for_each_entry_safe(group, next, &group_list, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) /* Remove item from the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) list_del_init(&group->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) mutex_lock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) /* Try to allocate default domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) probe_alloc_default_domain(bus, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (!group->default_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) mutex_unlock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) iommu_group_create_direct_mappings(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) ret = __iommu_group_dma_attach(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) mutex_unlock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) __iommu_group_dma_finalize(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) struct notifier_block *nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) if (!nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) nb->notifier_call = iommu_bus_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) err = bus_register_notifier(bus, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) err = bus_iommu_probe(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) /* Clean up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) bus_unregister_notifier(bus, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) kfree(nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) * bus_set_iommu - set iommu-callbacks for the bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) * @bus: bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * @ops: the callbacks provided by the iommu-driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * This function is called by an iommu driver to set the iommu methods
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * used for a particular bus. Drivers for devices on that bus can use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) * the iommu-api after these ops are registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) * This special function is needed because IOMMUs are usually devices on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) * the bus itself, so the iommu drivers are not initialized when the bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) * is set up. With this function the iommu-driver can set the iommu-ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) * afterwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (ops == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) bus->iommu_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (bus->iommu_ops != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) bus->iommu_ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) /* Do IOMMU specific setup for this bus-type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) err = iommu_bus_init(bus, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) bus->iommu_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) EXPORT_SYMBOL_GPL(bus_set_iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) bool iommu_present(struct bus_type *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) return bus->iommu_ops != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) EXPORT_SYMBOL_GPL(iommu_present);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) if (!bus->iommu_ops || !bus->iommu_ops->capable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) return bus->iommu_ops->capable(cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) EXPORT_SYMBOL_GPL(iommu_capable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) * iommu_set_fault_handler() - set a fault handler for an iommu domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) * @domain: iommu domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * @handler: fault handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * @token: user data, will be passed back to the fault handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * This function should be used by IOMMU users which want to be notified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * whenever an IOMMU fault happens.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) * The fault handler itself should return 0 on success, and an appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) * error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) void iommu_set_fault_handler(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) iommu_fault_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) void *token)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) BUG_ON(!domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) domain->handler = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) domain->handler_token = token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) unsigned type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) struct iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (bus == NULL || bus->iommu_ops == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) domain = bus->iommu_ops->domain_alloc(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) if (!domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) domain->ops = bus->iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) domain->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) /* Assume all sizes by default; the driver may override this later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) return domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) EXPORT_SYMBOL_GPL(iommu_domain_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) void iommu_domain_free(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) domain->ops->domain_free(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) EXPORT_SYMBOL_GPL(iommu_domain_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) static int __iommu_attach_device(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (unlikely(domain->ops->attach_dev == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) ret = domain->ops->attach_dev(domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) trace_attach_device_to_domain(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) group = iommu_group_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) * Lock the group to make sure the device-count doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) * change while we are attaching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) mutex_lock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) /* don't break attach if iommu shared by more than one master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) if (iommu_group_device_count(group) < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) ret = __iommu_attach_group(domain, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) mutex_unlock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) iommu_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) EXPORT_SYMBOL_GPL(iommu_attach_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * Check flags and other user provided data for valid combinations. We also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) * make sure no reserved fields or unused flags are set. This is to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * not breaking userspace in the future when these fields or flags are used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) if (info->cache & ~mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) if (info->granularity >= IOMMU_INV_GRANU_NR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) switch (info->granularity) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) case IOMMU_INV_GRANU_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) if (info->cache & IOMMU_CACHE_INV_TYPE_PASID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) mask = IOMMU_INV_ADDR_FLAGS_PASID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) IOMMU_INV_ADDR_FLAGS_ARCHID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) IOMMU_INV_ADDR_FLAGS_LEAF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) if (info->granu.addr_info.flags & ~mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) case IOMMU_INV_GRANU_PASID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) mask = IOMMU_INV_PASID_FLAGS_PASID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) IOMMU_INV_PASID_FLAGS_ARCHID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) if (info->granu.pasid_info.flags & ~mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) case IOMMU_INV_GRANU_DOMAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) /* Check reserved padding fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) for (i = 0; i < sizeof(info->padding); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) if (info->padding[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) void __user *uinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) struct iommu_cache_invalidate_info inv_info = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) u32 minsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) if (unlikely(!domain->ops->cache_invalidate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) * No new spaces can be added before the variable sized union, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) * minimum size is the offset to the union.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) minsz = offsetof(struct iommu_cache_invalidate_info, granu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) /* Copy minsz from user to get flags and argsz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) if (copy_from_user(&inv_info, uinfo, minsz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) /* Fields before the variable size union are mandatory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (inv_info.argsz < minsz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) /* PASID and address granu require additional info beyond minsz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) if (inv_info.granularity == IOMMU_INV_GRANU_PASID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if (inv_info.granularity == IOMMU_INV_GRANU_ADDR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) * User might be using a newer UAPI header which has a larger data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) * size, we shall support the existing flags within the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) * size. Copy the remaining user data _after_ minsz but not more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) * than the current kernel supported size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) /* Now the argsz is validated, check the content */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) ret = iommu_check_cache_invl_data(&inv_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) return domain->ops->cache_invalidate(domain, dev, &inv_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) if (data->version != IOMMU_GPASID_BIND_VERSION_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) /* Check the range of supported formats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) if (data->format >= IOMMU_PASID_FORMAT_LAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) /* Check all flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) mask = IOMMU_SVA_GPASID_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) if (data->flags & ~mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) /* Check reserved padding fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) for (i = 0; i < sizeof(data->padding); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) if (data->padding[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) static int iommu_sva_prepare_bind_data(void __user *udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) struct iommu_gpasid_bind_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) u32 minsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) * No new spaces can be added before the variable sized union, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) * minimum size is the offset to the union.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) minsz = offsetof(struct iommu_gpasid_bind_data, vendor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) /* Copy minsz from user to get flags and argsz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) if (copy_from_user(data, udata, minsz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) /* Fields before the variable size union are mandatory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) if (data->argsz < minsz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) * User might be using a newer UAPI header, we shall let IOMMU vendor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) * driver decide on what size it needs. Since the guest PASID bind data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) * can be vendor specific, larger argsz could be the result of extension
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) * for one vendor but it should not affect another vendor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) * Copy the remaining user data _after_ minsz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) if (copy_from_user((void *)data + minsz, udata + minsz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) min_t(u32, data->argsz, sizeof(*data)) - minsz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) return iommu_check_bind_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) void __user *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) struct iommu_gpasid_bind_data data = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) if (unlikely(!domain->ops->sva_bind_gpasid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) ret = iommu_sva_prepare_bind_data(udata, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) return domain->ops->sva_bind_gpasid(domain, dev, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) ioasid_t pasid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) if (unlikely(!domain->ops->sva_unbind_gpasid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) return domain->ops->sva_unbind_gpasid(dev, pasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) void __user *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) struct iommu_gpasid_bind_data data = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) if (unlikely(!domain->ops->sva_bind_gpasid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) ret = iommu_sva_prepare_bind_data(udata, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) return iommu_sva_unbind_gpasid(domain, dev, data.hpasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) static void __iommu_detach_device(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) if (iommu_is_attach_deferred(domain, dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (unlikely(domain->ops->detach_dev == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) domain->ops->detach_dev(domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) trace_detach_device_from_domain(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) group = iommu_group_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) mutex_lock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) /* Don't break detach if iommu shared by more than one master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) if (iommu_group_device_count(group) < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) __iommu_detach_group(domain, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) mutex_unlock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) iommu_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) EXPORT_SYMBOL_GPL(iommu_detach_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) struct iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) group = iommu_group_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) domain = group->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) iommu_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) return domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * For IOMMU_DOMAIN_DMA implementations which already provide their own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * guarantees that the group and its default domain are valid and correct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) struct iommu_domain *iommu_get_dma_domain(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) return dev->iommu_group->default_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) * IOMMU groups are really the natural working unit of the IOMMU, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) * the IOMMU API works on domains and devices. Bridge that gap by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) * iterating over the devices in a group. Ideally we'd have a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) * device which represents the requestor ID of the group, but we also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) * allow IOMMU drivers to create policy defined minimum sets, where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) * the physical hardware may be able to distiguish members, but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) * wish to group them at a higher level (ex. untrusted multi-function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) * PCI devices). Thus we attach each device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) static int iommu_group_do_attach_device(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) struct iommu_domain *domain = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) return __iommu_attach_device(domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) static int __iommu_attach_group(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) struct iommu_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) if (group->default_domain && group->domain != group->default_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) ret = __iommu_group_for_each_dev(group, domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) iommu_group_do_attach_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) group->domain = domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) mutex_lock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) ret = __iommu_attach_group(domain, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) mutex_unlock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) EXPORT_SYMBOL_GPL(iommu_attach_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) static int iommu_group_do_detach_device(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) struct iommu_domain *domain = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) __iommu_detach_device(domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) static void __iommu_detach_group(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) struct iommu_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) if (!group->default_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) __iommu_group_for_each_dev(group, domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) iommu_group_do_detach_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) group->domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) if (group->domain == group->default_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) /* Detach by re-attaching to the default domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) ret = __iommu_group_for_each_dev(group, group->default_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) iommu_group_do_attach_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) group->domain = group->default_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) mutex_lock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) __iommu_detach_group(domain, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) mutex_unlock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) EXPORT_SYMBOL_GPL(iommu_detach_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) if (unlikely(domain->ops->iova_to_phys == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) return domain->ops->iova_to_phys(domain, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) phys_addr_t paddr, size_t size, size_t *count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) unsigned int pgsize_idx, pgsize_idx_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) unsigned long pgsizes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) size_t offset, pgsize, pgsize_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) unsigned long addr_merge = paddr | iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) /* Page sizes supported by the hardware and small enough for @size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) /* Constrain the page sizes further based on the maximum alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) if (likely(addr_merge))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) pgsizes &= GENMASK(__ffs(addr_merge), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) /* Make sure we have at least one suitable page size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) BUG_ON(!pgsizes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) /* Pick the biggest page size remaining */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) pgsize_idx = __fls(pgsizes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) pgsize = BIT(pgsize_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) return pgsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) /* Find the next biggest support page size, if it exists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (!pgsizes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) goto out_set_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) pgsize_idx_next = __ffs(pgsizes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) pgsize_next = BIT(pgsize_idx_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) * There's no point trying a bigger page size unless the virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) * and physical addresses are similarly offset within the larger page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) if ((iova ^ paddr) & (pgsize_next - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) goto out_set_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) /* Calculate the offset to the next page size alignment boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) offset = pgsize_next - (addr_merge & (pgsize_next - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) * If size is big enough to accommodate the larger page, reduce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) * the number of smaller pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) if (offset + pgsize_next <= size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) size = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) out_set_count:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) *count = size >> pgsize_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) return pgsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) phys_addr_t paddr, size_t size, int prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) gfp_t gfp, size_t *mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) const struct iommu_ops *ops = domain->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) size_t pgsize, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) iova, &paddr, pgsize, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) if (ops->map_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) gfp, mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) *mapped = ret ? 0 : pgsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) const struct iommu_ops *ops = domain->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) unsigned long orig_iova = iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) unsigned int min_pagesz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) size_t orig_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) phys_addr_t orig_paddr = paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) if (unlikely(!(ops->map || ops->map_pages) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) domain->pgsize_bitmap == 0UL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) /* find out the minimum page size supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) * both the virtual address and the physical one, as well as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) * the size of the mapping, must be aligned (at least) to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) * size of the smallest page supported by the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) iova, &paddr, size, min_pagesz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) while (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) size_t mapped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) &mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) * Some pages may have been mapped, even if an error occurred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) * so we should account for those so they can be unmapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) size -= mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) iova += mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) paddr += mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) /* unroll mapping in case something went wrong */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) iommu_unmap(domain, orig_iova, orig_size - size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) trace_map(orig_iova, orig_paddr, orig_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) const struct iommu_ops *ops = domain->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) if (ret == 0 && ops->iotlb_sync_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) ops->iotlb_sync_map(domain, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) int iommu_map(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) phys_addr_t paddr, size_t size, int prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) EXPORT_SYMBOL_GPL(iommu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) phys_addr_t paddr, size_t size, int prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) EXPORT_SYMBOL_GPL(iommu_map_atomic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) static size_t __iommu_unmap_pages(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) struct iommu_iotlb_gather *iotlb_gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) const struct iommu_ops *ops = domain->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) size_t pgsize, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) pgsize = iommu_pgsize(domain, iova, iova, size, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) return ops->unmap_pages ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) ops->unmap(domain, iova, pgsize, iotlb_gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) static size_t __iommu_unmap(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) struct iommu_iotlb_gather *iotlb_gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) const struct iommu_ops *ops = domain->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) size_t unmapped_page, unmapped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) unsigned long orig_iova = iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) unsigned int min_pagesz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) if (unlikely(!(ops->unmap || ops->unmap_pages) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) domain->pgsize_bitmap == 0UL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) /* find out the minimum page size supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) * The virtual address, as well as the size of the mapping, must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) * aligned (at least) to the size of the smallest page supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) * by the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) if (!IS_ALIGNED(iova | size, min_pagesz)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) iova, size, min_pagesz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) * Keep iterating until we either unmap 'size' bytes (or more)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) * or we hit an area that isn't mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) while (unmapped < size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) unmapped_page = __iommu_unmap_pages(domain, iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) size - unmapped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) iotlb_gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) if (!unmapped_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) iova, unmapped_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) iova += unmapped_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) unmapped += unmapped_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) trace_unmap(orig_iova, size, unmapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) return unmapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) size_t iommu_unmap(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) unsigned long iova, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) struct iommu_iotlb_gather iotlb_gather;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) size_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) iommu_iotlb_gather_init(&iotlb_gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) iommu_iotlb_sync(domain, &iotlb_gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) EXPORT_SYMBOL_GPL(iommu_unmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) size_t iommu_unmap_fast(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) struct iommu_iotlb_gather *iotlb_gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) return __iommu_unmap(domain, iova, size, iotlb_gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) EXPORT_SYMBOL_GPL(iommu_unmap_fast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) struct scatterlist *sg, unsigned int nents, int prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) const struct iommu_ops *ops = domain->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) size_t len = 0, mapped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) phys_addr_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) unsigned int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) if (ops->map_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) ret = ops->map_sg(domain, iova, sg, nents, prot, gfp, &mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) if (ops->iotlb_sync_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) ops->iotlb_sync_map(domain, iova, mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) return mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) while (i <= nents) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) phys_addr_t s_phys = sg_phys(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) if (len && s_phys != start + len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) ret = __iommu_map(domain, iova + mapped, start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) len, prot, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) mapped += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) len += sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) len = sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) start = s_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) if (++i < nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) if (ops->iotlb_sync_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) ops->iotlb_sync_map(domain, iova, mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) return mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) /* undo mappings already done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) iommu_unmap(domain, iova, mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) struct scatterlist *sg, unsigned int nents, int prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) EXPORT_SYMBOL_GPL(iommu_map_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) struct scatterlist *sg, unsigned int nents, int prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) phys_addr_t paddr, u64 size, int prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) if (unlikely(domain->ops->domain_window_enable == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) if (unlikely(domain->ops->domain_window_disable == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) return domain->ops->domain_window_disable(domain, wnd_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) * @domain: the iommu domain where the fault has happened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) * @dev: the device where the fault has happened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) * @iova: the faulting address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) * This function should be called by the low-level IOMMU implementations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) * whenever IOMMU faults happen, to allow high-level users, that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) * interested in such events, to know about them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) * This event may be useful for several possible use cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) * - mere logging of the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) * - dynamic TLB/PTE loading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) * - if restarting of the faulting device is required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) * Returns 0 on success and an appropriate error code otherwise (if dynamic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) * PTE/TLB loading will one day be supported, implementations will be able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) * to tell whether it succeeded or not according to this return value).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) * Specifically, -ENOSYS is returned if a fault handler isn't installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) * (though fault handlers can also return -ENOSYS, in case they want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) * elicit the default behavior of the IOMMU drivers).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) unsigned long iova, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) int ret = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) * if upper layers showed interest and installed a fault handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) * invoke it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) if (domain->handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) ret = domain->handler(domain, dev, iova, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) domain->handler_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) trace_io_page_fault(dev, iova, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) EXPORT_SYMBOL_GPL(report_iommu_fault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) static int __init iommu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) iommu_group_kset = kset_create_and_add("iommu_groups",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) NULL, kernel_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) BUG_ON(!iommu_group_kset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) iommu_debugfs_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) core_initcall(iommu_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) int iommu_domain_get_attr(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) enum iommu_attr attr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) struct iommu_domain_geometry *geometry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) bool *paging;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) switch (attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) case DOMAIN_ATTR_GEOMETRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) geometry = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) *geometry = domain->geometry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) case DOMAIN_ATTR_PAGING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) paging = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) *paging = (domain->pgsize_bitmap != 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) if (!domain->ops->domain_get_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) ret = domain->ops->domain_get_attr(domain, attr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) int iommu_domain_set_attr(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) enum iommu_attr attr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) switch (attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) if (domain->ops->domain_set_attr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) ret = domain->ops->domain_set_attr(domain, attr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) void iommu_get_resv_regions(struct device *dev, struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) const struct iommu_ops *ops = dev->bus->iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) if (ops && ops->get_resv_regions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) ops->get_resv_regions(dev, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) void iommu_put_resv_regions(struct device *dev, struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) const struct iommu_ops *ops = dev->bus->iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) if (ops && ops->put_resv_regions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) ops->put_resv_regions(dev, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) * generic_iommu_put_resv_regions - Reserved region driver helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) * @dev: device for which to free reserved regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) * @list: reserved region list for device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) * IOMMU drivers can use this to implement their .put_resv_regions() callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) * for simple reservations. Memory allocated for each reserved region will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) * freed. If an IOMMU driver allocates additional resources per region, it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) * going to have to implement a custom callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) struct iommu_resv_region *entry, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) list_for_each_entry_safe(entry, next, list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) EXPORT_SYMBOL(generic_iommu_put_resv_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) size_t length, int prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) enum iommu_resv_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) struct iommu_resv_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) region = kzalloc(sizeof(*region), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) if (!region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) INIT_LIST_HEAD(®ion->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) region->start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) region->length = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) region->prot = prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) region->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) return region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) void iommu_set_default_passthrough(bool cmd_line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) if (cmd_line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) iommu_set_cmd_line_dma_api();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) void iommu_set_default_translated(bool cmd_line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) if (cmd_line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) iommu_set_cmd_line_dma_api();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) iommu_def_domain_type = IOMMU_DOMAIN_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) bool iommu_default_passthrough(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) EXPORT_SYMBOL_GPL(iommu_default_passthrough);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) const struct iommu_ops *ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) struct iommu_device *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) spin_lock(&iommu_device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) list_for_each_entry(iommu, &iommu_device_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) if (iommu->fwnode == fwnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) ops = iommu->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) spin_unlock(&iommu_device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) return ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) const struct iommu_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) if (fwspec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) return ops == fwspec->ops ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) if (!dev_iommu_get(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) /* Preallocate for the overwhelmingly common case of 1 ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) if (!fwspec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) of_node_get(to_of_node(iommu_fwnode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) fwspec->iommu_fwnode = iommu_fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) fwspec->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) dev_iommu_fwspec_set(dev, fwspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) EXPORT_SYMBOL_GPL(iommu_fwspec_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) void iommu_fwspec_free(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) if (fwspec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) fwnode_handle_put(fwspec->iommu_fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) kfree(fwspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) dev_iommu_fwspec_set(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) EXPORT_SYMBOL_GPL(iommu_fwspec_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) int i, new_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) if (!fwspec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) new_num = fwspec->num_ids + num_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) if (new_num > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) if (!fwspec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) dev_iommu_fwspec_set(dev, fwspec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) for (i = 0; i < num_ids; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) fwspec->ids[fwspec->num_ids + i] = ids[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) fwspec->num_ids = new_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) * Per device IOMMU features.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) const struct iommu_ops *ops = dev->bus->iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) if (ops && ops->dev_has_feat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) return ops->dev_has_feat(dev, feat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) if (dev->iommu && dev->iommu->iommu_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) if (ops->dev_enable_feat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) return ops->dev_enable_feat(dev, feat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) * The device drivers should do the necessary cleanups before calling this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) * For example, before disabling the aux-domain feature, the device driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) * should detach all aux-domains. Otherwise, this will return -EBUSY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) if (dev->iommu && dev->iommu->iommu_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) if (ops->dev_disable_feat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) return ops->dev_disable_feat(dev, feat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) if (dev->iommu && dev->iommu->iommu_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) if (ops->dev_feat_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) return ops->dev_feat_enabled(dev, feat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) * Aux-domain specific attach/detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) * true. Also, as long as domains are attached to a device through this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) * interface, any tries to call iommu_attach_device() should fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) * This should make us safe against a device being attached to a guest as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) * whole while there are still pasid users on it (aux and sva).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) if (domain->ops->aux_attach_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) ret = domain->ops->aux_attach_dev(domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) trace_attach_device_to_domain(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) if (domain->ops->aux_detach_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) domain->ops->aux_detach_dev(domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) trace_detach_device_from_domain(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) if (domain->ops->aux_get_pasid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) ret = domain->ops->aux_get_pasid(domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) * iommu_sva_bind_device() - Bind a process address space to a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) * @dev: the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) * @mm: the mm to bind, caller must hold a reference to it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) * Create a bond between device and address space, allowing the device to access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) * the mm using the returned PASID. If a bond already exists between @device and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) * @mm, it is returned and an additional reference is taken. Caller must call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) * iommu_sva_unbind_device() to release each reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) * initialize the required SVA features.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) * On error, returns an ERR_PTR value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) struct iommu_sva *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) struct iommu_sva *handle = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) const struct iommu_ops *ops = dev->bus->iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) if (!ops || !ops->sva_bind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) group = iommu_group_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) /* Ensure device count and domain don't change while we're binding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) mutex_lock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) * To keep things simple, SVA currently doesn't support IOMMU groups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) * with more than one device. Existing SVA-capable systems are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) * affected by the problems that required IOMMU groups (lack of ACS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) * isolation, device ID aliasing and other hardware issues).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) if (iommu_group_device_count(group) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) handle = ops->sva_bind(dev, mm, drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) mutex_unlock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) iommu_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) return handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) * @handle: the handle returned by iommu_sva_bind_device()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) * Put reference to a bond between device and address space. The device should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) * not be issuing any more transaction for this PASID. All outstanding page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) * requests for this PASID must have been flushed to the IOMMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) * Returns 0 on success, or an error value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) void iommu_sva_unbind_device(struct iommu_sva *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) struct device *dev = handle->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) const struct iommu_ops *ops = dev->bus->iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) if (!ops || !ops->sva_unbind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) group = iommu_group_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) mutex_lock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) ops->sva_unbind(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) mutex_unlock(&group->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) iommu_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) u32 iommu_sva_get_pasid(struct iommu_sva *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) if (!ops || !ops->sva_get_pasid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) return IOMMU_PASID_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) return ops->sva_get_pasid(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);