^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Joerg Roedel <jroedel@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define pr_fmt(fmt) "AMD-Vi: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/mmu_notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/amd-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mm_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/profile.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "amd_iommu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define MAX_DEVICES 0x10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define PRI_QUEUE_SIZE 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct pri_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) atomic_t inflight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) bool finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct pasid_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct list_head list; /* For global state-list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) atomic_t count; /* Reference count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) unsigned mmu_notifier_count; /* Counting nested mmu_notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct mm_struct *mm; /* mm_struct for the faults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct mmu_notifier mn; /* mmu_notifier handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct device_state *device_state; /* Link to our device_state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) u32 pasid; /* PASID index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) bool invalid; /* Used during setup and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) teardown of the pasid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) spinlock_t lock; /* Protect pri_queues and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) mmu_notifer_count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) wait_queue_head_t wq; /* To wait for count == 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct device_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u16 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) atomic_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct pasid_state **states;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int pasid_levels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) int max_pasids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) amd_iommu_invalid_ppr_cb inv_ppr_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) amd_iommu_invalidate_ctx inv_ctx_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) wait_queue_head_t wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct fault {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct device_state *dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct pasid_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u64 address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u16 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u32 pasid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u16 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u16 finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static LIST_HEAD(state_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static spinlock_t state_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static struct workqueue_struct *iommu_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static void free_pasid_states(struct device_state *dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static u16 device_id(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u16 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) devid = pdev->bus->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) devid = (devid << 8) | pdev->devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static struct device_state *__get_device_state(u16 devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct device_state *dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) list_for_each_entry(dev_state, &state_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (dev_state->devid == devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static struct device_state *get_device_state(u16 devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct device_state *dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) spin_lock_irqsave(&state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) dev_state = __get_device_state(devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (dev_state != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) atomic_inc(&dev_state->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) spin_unlock_irqrestore(&state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static void free_device_state(struct device_state *dev_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * First detach device from domain - No more PRI requests will arrive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * from that device after it is unbound from the IOMMUv2 domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) group = iommu_group_get(&dev_state->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (WARN_ON(!group))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) iommu_detach_group(dev_state->domain, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) iommu_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Everything is down now, free the IOMMUv2 domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) iommu_domain_free(dev_state->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* Finally get rid of the device-state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) kfree(dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static void put_device_state(struct device_state *dev_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (atomic_dec_and_test(&dev_state->count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) wake_up(&dev_state->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* Must be called under dev_state->lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) u32 pasid, bool alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct pasid_state **root, **ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) int level, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) level = dev_state->pasid_levels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) root = dev_state->states;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) index = (pasid >> (9 * level)) & 0x1ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) ptr = &root[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (level == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (*ptr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (!alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (*ptr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) root = (struct pasid_state **)*ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) level -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static int set_pasid_state(struct device_state *dev_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct pasid_state *pasid_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) u32 pasid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct pasid_state **ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) spin_lock_irqsave(&dev_state->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ptr = __get_pasid_state_ptr(dev_state, pasid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (ptr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (*ptr != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) *ptr = pasid_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) spin_unlock_irqrestore(&dev_state->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static void clear_pasid_state(struct device_state *dev_state, u32 pasid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct pasid_state **ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) spin_lock_irqsave(&dev_state->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) ptr = __get_pasid_state_ptr(dev_state, pasid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (ptr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) *ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) spin_unlock_irqrestore(&dev_state->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static struct pasid_state *get_pasid_state(struct device_state *dev_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) u32 pasid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct pasid_state **ptr, *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) spin_lock_irqsave(&dev_state->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ptr = __get_pasid_state_ptr(dev_state, pasid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (ptr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) ret = *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) atomic_inc(&ret->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) spin_unlock_irqrestore(&dev_state->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static void free_pasid_state(struct pasid_state *pasid_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) kfree(pasid_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static void put_pasid_state(struct pasid_state *pasid_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (atomic_dec_and_test(&pasid_state->count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) wake_up(&pasid_state->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static void put_pasid_state_wait(struct pasid_state *pasid_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) atomic_dec(&pasid_state->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) free_pasid_state(pasid_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static void unbind_pasid(struct pasid_state *pasid_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) domain = pasid_state->device_state->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * Mark pasid_state as invalid, no more faults will we added to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * work queue after this is visible everywhere.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) pasid_state->invalid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* Make sure this is visible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* After this the device/pasid can't access the mm anymore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* Make sure no more pending faults are in the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) flush_workqueue(iommu_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static void free_pasid_states_level1(struct pasid_state **tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) for (i = 0; i < 512; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (tbl[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) free_page((unsigned long)tbl[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static void free_pasid_states_level2(struct pasid_state **tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct pasid_state **ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) for (i = 0; i < 512; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (tbl[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ptr = (struct pasid_state **)tbl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) free_pasid_states_level1(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static void free_pasid_states(struct device_state *dev_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct pasid_state *pasid_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) for (i = 0; i < dev_state->max_pasids; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) pasid_state = get_pasid_state(dev_state, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (pasid_state == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) put_pasid_state(pasid_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * This will call the mn_release function and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * unbind the PASID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) put_pasid_state_wait(pasid_state); /* Reference taken in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) amd_iommu_bind_pasid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* Drop reference taken in amd_iommu_bind_pasid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) put_device_state(dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (dev_state->pasid_levels == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) free_pasid_states_level2(dev_state->states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) else if (dev_state->pasid_levels == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) free_pasid_states_level1(dev_state->states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) BUG_ON(dev_state->pasid_levels != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) free_page((unsigned long)dev_state->states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return container_of(mn, struct pasid_state, mn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static void mn_invalidate_range(struct mmu_notifier *mn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct pasid_state *pasid_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct device_state *dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) pasid_state = mn_to_state(mn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) dev_state = pasid_state->device_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if ((start ^ (end - 1)) < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct pasid_state *pasid_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct device_state *dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) bool run_inv_ctx_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) pasid_state = mn_to_state(mn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) dev_state = pasid_state->device_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) run_inv_ctx_cb = !pasid_state->invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) unbind_pasid(pasid_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static const struct mmu_notifier_ops iommu_mn = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) .release = mn_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) .invalidate_range = mn_invalidate_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static void set_pri_tag_status(struct pasid_state *pasid_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) u16 tag, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) spin_lock_irqsave(&pasid_state->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) pasid_state->pri[tag].status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) spin_unlock_irqrestore(&pasid_state->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static void finish_pri_tag(struct device_state *dev_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct pasid_state *pasid_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) u16 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) spin_lock_irqsave(&pasid_state->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) pasid_state->pri[tag].finish) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) pasid_state->pri[tag].status, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) pasid_state->pri[tag].finish = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) pasid_state->pri[tag].status = PPR_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) spin_unlock_irqrestore(&pasid_state->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static void handle_fault_error(struct fault *fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (!fault->dev_state->inv_ppr_cb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) fault->pasid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) fault->address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) fault->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) case AMD_IOMMU_INV_PRI_RSP_INVALID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) case AMD_IOMMU_INV_PRI_RSP_FAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static bool access_error(struct vm_area_struct *vma, struct fault *fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) unsigned long requested = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (fault->flags & PPR_FAULT_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) requested |= VM_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (fault->flags & PPR_FAULT_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) requested |= VM_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (fault->flags & PPR_FAULT_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) requested |= VM_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return (requested & ~vma->vm_flags) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static void do_fault(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct fault *fault = container_of(work, struct fault, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) vm_fault_t ret = VM_FAULT_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) unsigned int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) u64 address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) mm = fault->state->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) address = fault->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (fault->flags & PPR_FAULT_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) flags |= FAULT_FLAG_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (fault->flags & PPR_FAULT_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) flags |= FAULT_FLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) flags |= FAULT_FLAG_REMOTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) vma = find_extend_vma(mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (!vma || address < vma->vm_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* failed to get a vma in the right range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /* Check if we have the right permissions on the vma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (access_error(vma, fault))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) ret = handle_mm_fault(vma, address, flags, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (ret & VM_FAULT_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* failed to service fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) handle_fault_error(fault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) finish_pri_tag(fault->dev_state, fault->state, fault->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) put_pasid_state(fault->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) kfree(fault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct amd_iommu_fault *iommu_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct pasid_state *pasid_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct device_state *dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct pci_dev *pdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct fault *fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) bool finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) u16 tag, devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) iommu_fault = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) tag = iommu_fault->tag & 0x1ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) finish = (iommu_fault->tag >> 9) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) devid = iommu_fault->device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) devid & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (!pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ret = NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* In kdump kernel pci dev is not initialized yet -> send INVALID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) PPR_INVALID, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) dev_state = get_device_state(iommu_fault->device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (dev_state == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (pasid_state == NULL || pasid_state->invalid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /* We know the device but not the PASID -> send INVALID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) PPR_INVALID, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) goto out_drop_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) spin_lock_irqsave(&pasid_state->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) atomic_inc(&pasid_state->pri[tag].inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (finish)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) pasid_state->pri[tag].finish = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) spin_unlock_irqrestore(&pasid_state->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (fault == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* We are OOM - send success and let the device re-fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) finish_pri_tag(dev_state, pasid_state, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) goto out_drop_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) fault->dev_state = dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) fault->address = iommu_fault->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) fault->state = pasid_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) fault->tag = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) fault->finish = finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) fault->pasid = iommu_fault->pasid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) fault->flags = iommu_fault->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) INIT_WORK(&fault->work, do_fault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) queue_work(iommu_wq, &fault->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ret = NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) out_drop_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (ret != NOTIFY_OK && pasid_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) put_pasid_state(pasid_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) put_device_state(dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static struct notifier_block ppr_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) .notifier_call = ppr_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct pasid_state *pasid_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) struct device_state *dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) u16 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (!amd_iommu_v2_supported())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) devid = device_id(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) dev_state = get_device_state(devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (dev_state == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (pasid >= dev_state->max_pasids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (pasid_state == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) atomic_set(&pasid_state->count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) init_waitqueue_head(&pasid_state->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) spin_lock_init(&pasid_state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) mm = get_task_mm(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) pasid_state->mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) pasid_state->device_state = dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) pasid_state->pasid = pasid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) pasid_state->invalid = true; /* Mark as valid only if we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) done with setting up the pasid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) pasid_state->mn.ops = &iommu_mn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (pasid_state->mm == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) mmu_notifier_register(&pasid_state->mn, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ret = set_pasid_state(dev_state, pasid_state, pasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) goto out_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) __pa(pasid_state->mm->pgd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) goto out_clear_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /* Now we are ready to handle faults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) pasid_state->invalid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * Drop the reference to the mm_struct here. We rely on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * mmu_notifier release call-back to inform us when the mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * is going away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) out_clear_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) clear_pasid_state(dev_state, pasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) out_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) mmu_notifier_unregister(&pasid_state->mn, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) free_pasid_state(pasid_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) put_device_state(dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) EXPORT_SYMBOL(amd_iommu_bind_pasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct pasid_state *pasid_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct device_state *dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) u16 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (!amd_iommu_v2_supported())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) devid = device_id(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) dev_state = get_device_state(devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (dev_state == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (pasid >= dev_state->max_pasids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) pasid_state = get_pasid_state(dev_state, pasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (pasid_state == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * Drop reference taken here. We are safe because we still hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * the reference taken in the amd_iommu_bind_pasid function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) put_pasid_state(pasid_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /* Clear the pasid state so that the pasid can be re-used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) clear_pasid_state(dev_state, pasid_state->pasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * Call mmu_notifier_unregister to drop our reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * to pasid_state->mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) put_pasid_state_wait(pasid_state); /* Reference taken in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) amd_iommu_bind_pasid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /* Drop reference taken in this function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) put_device_state(dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /* Drop reference taken in amd_iommu_bind_pasid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) put_device_state(dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) EXPORT_SYMBOL(amd_iommu_unbind_pasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct device_state *dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) int ret, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) u16 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * When memory encryption is active the device is likely not in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * direct-mapped domain. Forbid using IOMMUv2 functionality for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (mem_encrypt_active())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (!amd_iommu_v2_supported())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (pasids <= 0 || pasids > (PASID_MASK + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) devid = device_id(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (dev_state == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) spin_lock_init(&dev_state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) init_waitqueue_head(&dev_state->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) dev_state->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) dev_state->devid = devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) tmp = pasids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) dev_state->pasid_levels += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) atomic_set(&dev_state->count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) dev_state->max_pasids = pasids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (dev_state->states == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) goto out_free_dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) dev_state->domain = iommu_domain_alloc(&pci_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (dev_state->domain == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) goto out_free_states;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) amd_iommu_domain_direct_map(dev_state->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) goto out_free_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) group = iommu_group_get(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (!group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) goto out_free_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ret = iommu_attach_group(dev_state->domain, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) goto out_drop_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) iommu_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) spin_lock_irqsave(&state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (__get_device_state(devid) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) spin_unlock_irqrestore(&state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) goto out_free_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) list_add_tail(&dev_state->list, &state_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) spin_unlock_irqrestore(&state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) out_drop_group:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) iommu_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) out_free_domain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) iommu_domain_free(dev_state->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) out_free_states:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) free_page((unsigned long)dev_state->states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) out_free_dev_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) kfree(dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) EXPORT_SYMBOL(amd_iommu_init_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) void amd_iommu_free_device(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct device_state *dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) u16 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (!amd_iommu_v2_supported())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) devid = device_id(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) spin_lock_irqsave(&state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) dev_state = __get_device_state(devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (dev_state == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) spin_unlock_irqrestore(&state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) list_del(&dev_state->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) spin_unlock_irqrestore(&state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /* Get rid of any remaining pasid states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) free_pasid_states(dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) put_device_state(dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * Wait until the last reference is dropped before freeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * the device state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) wait_event(dev_state->wq, !atomic_read(&dev_state->count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) free_device_state(dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) EXPORT_SYMBOL(amd_iommu_free_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) amd_iommu_invalid_ppr_cb cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct device_state *dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) u16 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (!amd_iommu_v2_supported())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) devid = device_id(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) spin_lock_irqsave(&state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) dev_state = __get_device_state(devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (dev_state == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) dev_state->inv_ppr_cb = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) spin_unlock_irqrestore(&state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) amd_iommu_invalidate_ctx cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) struct device_state *dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) u16 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (!amd_iommu_v2_supported())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) devid = device_id(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) spin_lock_irqsave(&state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) dev_state = __get_device_state(devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (dev_state == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) dev_state->inv_ctx_cb = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) spin_unlock_irqrestore(&state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static int __init amd_iommu_v2_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (!amd_iommu_v2_supported()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) pr_info("AMD IOMMUv2 functionality not available on this system - This is not a bug.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * Load anyway to provide the symbols to other modules
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * which may use AMD IOMMUv2 optionally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) spin_lock_init(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (iommu_wq == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) amd_iommu_register_ppr_notifier(&ppr_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) pr_info("AMD IOMMUv2 loaded and initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) static void __exit amd_iommu_v2_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) struct device_state *dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (!amd_iommu_v2_supported())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) amd_iommu_unregister_ppr_notifier(&ppr_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) flush_workqueue(iommu_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * The loop below might call flush_workqueue(), so call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * destroy_workqueue() after it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) for (i = 0; i < MAX_DEVICES; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) dev_state = get_device_state(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (dev_state == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) put_device_state(dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) amd_iommu_free_device(dev_state->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) destroy_workqueue(iommu_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) module_init(amd_iommu_v2_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) module_exit(amd_iommu_v2_exit);