^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2013 Freescale Semiconductor, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author: Varun Sethi <varun.sethi@freescale.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "fsl_pamu_domain.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <sysdev/fsl_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Global spinlock that needs to be held while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * configuring PAMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static DEFINE_SPINLOCK(iommu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static struct kmem_cache *fsl_pamu_domain_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static struct kmem_cache *iommu_devinfo_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static DEFINE_SPINLOCK(device_domain_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct iommu_device pamu_iommu; /* IOMMU core code handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return container_of(dom, struct fsl_dma_domain, iommu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static int __init iommu_init_mempool(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) sizeof(struct fsl_dma_domain),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) SLAB_HWCACHE_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (!fsl_pamu_domain_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) pr_debug("Couldn't create fsl iommu_domain cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) sizeof(struct device_domain_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) SLAB_HWCACHE_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (!iommu_devinfo_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) pr_debug("Couldn't create devinfo cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) kmem_cache_destroy(fsl_pamu_domain_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u32 win_cnt = dma_domain->win_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct dma_window *win_ptr = &dma_domain->win_arr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct iommu_domain_geometry *geom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) geom = &dma_domain->iommu_domain.geometry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (!win_cnt || !dma_domain->geom_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) pr_debug("Number of windows/geometry not configured for the domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (win_cnt > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u64 subwin_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) dma_addr_t subwin_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u32 wnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) subwin_iova = iova & ~(subwin_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) win_ptr = &dma_domain->win_arr[wnd];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (win_ptr->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return win_ptr->paddr + (iova & (win_ptr->size - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct dma_window *sub_win_ptr = &dma_domain->win_arr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) unsigned long rpn, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) for (i = 0; i < dma_domain->win_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (sub_win_ptr[i].valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) spin_lock_irqsave(&iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) sub_win_ptr[i].size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ~(u32)0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) rpn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) dma_domain->snoop_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) dma_domain->stash_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) (i > 0) ? 1 : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) sub_win_ptr[i].prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) spin_unlock_irqrestore(&iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) pr_debug("SPAACE configuration failed for liodn %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) liodn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct dma_window *wnd = &dma_domain->win_arr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) spin_lock_irqsave(&iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ret = pamu_config_ppaace(liodn, wnd_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) wnd->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ~(u32)0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) wnd->paddr >> PAMU_PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) dma_domain->snoop_id, dma_domain->stash_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 0, wnd->prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) spin_unlock_irqrestore(&iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) pr_debug("PAACE configuration failed for liodn %d\n", liodn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Map the DMA window corresponding to the LIODN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (dma_domain->win_cnt > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return map_subwins(liodn, dma_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return map_win(liodn, dma_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* Update window/subwindow mapping for the LIODN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) spin_lock_irqsave(&iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (dma_domain->win_cnt > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) wnd->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) ~(u32)0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) wnd->paddr >> PAMU_PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) dma_domain->snoop_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) dma_domain->stash_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) (wnd_nr > 0) ? 1 : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) wnd->prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) pr_debug("Subwindow reconfiguration failed for liodn %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) liodn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) phys_addr_t wnd_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) ret = pamu_config_ppaace(liodn, wnd_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) wnd->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) ~(u32)0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) wnd->paddr >> PAMU_PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) dma_domain->snoop_id, dma_domain->stash_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 0, wnd->prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) pr_debug("Window reconfiguration failed for liodn %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) liodn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) spin_unlock_irqrestore(&iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int ret = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) spin_lock_irqsave(&iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (!dma_domain->win_arr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) pr_debug("Windows not configured, stash destination update failed for liodn %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) liodn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) spin_unlock_irqrestore(&iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) for (i = 0; i < dma_domain->win_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ret = pamu_update_paace_stash(liodn, i, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) i, liodn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) spin_unlock_irqrestore(&iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) spin_unlock_irqrestore(&iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* Set the geometry parameters for a LIODN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static int pamu_set_liodn(int liodn, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct fsl_dma_domain *dma_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct iommu_domain_geometry *geom_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) u32 win_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) phys_addr_t window_addr, window_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) phys_addr_t subwin_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int ret = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u32 omi_index = ~(u32)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * Configure the omi_index at the geometry setup time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * This is a static value which depends on the type of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * device and would not change thereafter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) get_ome_index(&omi_index, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) window_addr = geom_attr->aperture_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) window_size = dma_domain->geom_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) spin_lock_irqsave(&iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ret = pamu_disable_liodn(liodn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 0, dma_domain->snoop_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) dma_domain->stash_id, win_cnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) spin_unlock_irqrestore(&iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) liodn, win_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (win_cnt > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) subwin_size = window_size >> ilog2(win_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) for (i = 0; i < win_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) spin_lock_irqsave(&iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) ret = pamu_disable_spaace(liodn, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ret = pamu_config_spaace(liodn, win_cnt, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) subwin_size, omi_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 0, dma_domain->snoop_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) dma_domain->stash_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) spin_unlock_irqrestore(&iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) pr_debug("SPAACE configuration failed for liodn %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) liodn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int check_size(u64 size, dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * Size must be a power of two and at least be equal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * to PAMU page size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) pr_debug("Size too small or not a power of two\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* iova must be page size aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (iova & (size - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) pr_debug("Address is not aligned with window size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct fsl_dma_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (!domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) domain->stash_id = ~(u32)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) domain->snoop_id = ~(u32)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) domain->win_cnt = pamu_get_max_subwin_cnt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) domain->geom_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) INIT_LIST_HEAD(&domain->devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) spin_lock_init(&domain->domain_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) list_del(&info->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) spin_lock_irqsave(&iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (win_cnt > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) pamu_free_subwins(info->liodn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) pamu_disable_liodn(info->liodn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) spin_unlock_irqrestore(&iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) spin_lock_irqsave(&device_domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) dev_iommu_priv_set(info->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) kmem_cache_free(iommu_devinfo_cache, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) spin_unlock_irqrestore(&device_domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct device_domain_info *info, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) spin_lock_irqsave(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* Remove the device from the domain device list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (!dev || (info->dev == dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) remove_device_ref(info, dma_domain->win_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct device_domain_info *info, *old_domain_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) spin_lock_irqsave(&device_domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * Check here if the device is already attached to domain or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * If the device is already attached to a domain detach it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) old_domain_info = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (old_domain_info && old_domain_info->domain != dma_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) spin_unlock_irqrestore(&device_domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) detach_device(dev, old_domain_info->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) spin_lock_irqsave(&device_domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) info->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) info->liodn = liodn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) info->domain = dma_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) list_add(&info->link, &dma_domain->devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * In case of devices with multiple LIODNs just store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * the info for the first LIODN as all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * LIODNs share the same domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (!dev_iommu_priv_get(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) dev_iommu_priv_set(dev, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) spin_unlock_irqrestore(&device_domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (iova < domain->geometry.aperture_start ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) iova > domain->geometry.aperture_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return get_phys_addr(dma_domain, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static bool fsl_pamu_capable(enum iommu_cap cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return cap == IOMMU_CAP_CACHE_COHERENCY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static void fsl_pamu_domain_free(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* remove all the devices from the device list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) detach_device(NULL, dma_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) dma_domain->enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) dma_domain->mapped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct fsl_dma_domain *dma_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (type != IOMMU_DOMAIN_UNMANAGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) dma_domain = iommu_alloc_dma_domain();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (!dma_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) pr_debug("dma_domain allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* defaul geometry 64 GB i.e. maximum system address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) dma_domain->iommu_domain. geometry.aperture_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) dma_domain->iommu_domain.geometry.force_aperture = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return &dma_domain->iommu_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* Configure geometry settings for all LIODNs associated with domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct iommu_domain_geometry *geom_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) u32 win_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct device_domain_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) list_for_each_entry(info, &dma_domain->devices, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) geom_attr, win_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /* Update stash destination for all LIODNs associated with the domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct device_domain_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) list_for_each_entry(info, &dma_domain->devices, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ret = update_liodn_stash(info->liodn, dma_domain, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /* Update domain mappings for all LIODNs associated with the domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct device_domain_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) list_for_each_entry(info, &dma_domain->devices, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ret = update_liodn(info->liodn, dma_domain, wnd_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct device_domain_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) list_for_each_entry(info, &dma_domain->devices, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ret = pamu_disable_liodn(info->liodn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) dma_domain->enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) ret = pamu_disable_spaace(info->liodn, wnd_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) spin_lock_irqsave(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (!dma_domain->win_arr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) pr_debug("Number of windows not configured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (wnd_nr >= dma_domain->win_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) pr_debug("Invalid window index\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (dma_domain->win_arr[wnd_nr].valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) ret = disable_domain_win(dma_domain, wnd_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) dma_domain->win_arr[wnd_nr].valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) dma_domain->mapped--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) phys_addr_t paddr, u64 size, int prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct dma_window *wnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) int pamu_prot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) u64 win_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (prot & IOMMU_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) pamu_prot |= PAACE_AP_PERMS_QUERY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (prot & IOMMU_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) pamu_prot |= PAACE_AP_PERMS_UPDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) spin_lock_irqsave(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (!dma_domain->win_arr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) pr_debug("Number of windows not configured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (wnd_nr >= dma_domain->win_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) pr_debug("Invalid window index\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (size > win_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) pr_debug("Invalid window size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (dma_domain->win_cnt == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (dma_domain->enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) pr_debug("Disable the window before updating the mapping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) ret = check_size(size, domain->geometry.aperture_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) pr_debug("Aperture start not aligned to the size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) wnd = &dma_domain->win_arr[wnd_nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (!wnd->valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) wnd->paddr = paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) wnd->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) wnd->prot = pamu_prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ret = update_domain_mapping(dma_domain, wnd_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) wnd->valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) dma_domain->mapped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) pr_debug("Disable the window before updating the mapping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * Attach the LIODN to the DMA domain and configure the geometry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * and window mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static int handle_attach_device(struct fsl_dma_domain *dma_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct device *dev, const u32 *liodn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct iommu_domain *domain = &dma_domain->iommu_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) spin_lock_irqsave(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* Ensure that LIODN value is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) liodn[i], dev->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) attach_device(dma_domain, liodn[i], dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * Check if geometry has already been configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * for the domain. If yes, set the geometry for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * the LIODN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (dma_domain->win_arr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ret = pamu_set_liodn(liodn[i], dev, dma_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) &domain->geometry, win_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (dma_domain->mapped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * Create window/subwindow mapping for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * the LIODN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ret = map_liodn(liodn[i], dma_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static int fsl_pamu_attach_device(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) const u32 *liodn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) u32 liodn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) int len, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct pci_dev *pdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct pci_controller *pci_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * Use LIODN of the PCI controller while attaching a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * PCI device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (dev_is_pci(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) pci_ctl = pci_bus_to_host(pdev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * make dev point to pci controller device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * so we can get the LIODN programmed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * u-boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) dev = pci_ctl->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (liodn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) liodn_cnt = len / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) static void fsl_pamu_detach_device(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) const u32 *prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct pci_dev *pdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct pci_controller *pci_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * Use LIODN of the PCI controller while detaching a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * PCI device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (dev_is_pci(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) pci_ctl = pci_bus_to_host(pdev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * make dev point to pci controller device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * so we can get the LIODN programmed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * u-boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) dev = pci_ctl->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) prop = of_get_property(dev->of_node, "fsl,liodn", &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (prop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) detach_device(dev, dma_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) static int configure_domain_geometry(struct iommu_domain *domain, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct iommu_domain_geometry *geom_attr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) dma_addr_t geom_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * Sanity check the geometry size. Also, we do not support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * DMA outside of the geometry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (check_size(geom_size, geom_attr->aperture_start) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) !geom_attr->force_aperture) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) pr_debug("Invalid PAMU geometry attributes\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) spin_lock_irqsave(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (dma_domain->enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) pr_debug("Can't set geometry attributes as domain is active\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /* Copy the domain geometry information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) memcpy(&domain->geometry, geom_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) sizeof(struct iommu_domain_geometry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) dma_domain->geom_size = geom_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* Set the domain stash attribute */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) struct pamu_stash_attribute *stash_attr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) spin_lock_irqsave(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) memcpy(&dma_domain->dma_stash, stash_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) sizeof(struct pamu_stash_attribute));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) dma_domain->stash_id = get_stash_id(stash_attr->cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) stash_attr->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (dma_domain->stash_id == ~(u32)0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) pr_debug("Invalid stash attributes\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) ret = update_domain_stash(dma_domain, dma_domain->stash_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /* Configure domain dma state i.e. enable/disable DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct device_domain_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) spin_lock_irqsave(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (enable && !dma_domain->mapped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) pr_debug("Can't enable DMA domain without valid mapping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) dma_domain->enabled = enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) list_for_each_entry(info, &dma_domain->devices, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) ret = (enable) ? pamu_enable_liodn(info->liodn) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) pamu_disable_liodn(info->liodn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) pr_debug("Unable to set dma state for liodn %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) info->liodn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) spin_lock_irqsave(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (dma_domain->enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) pr_debug("Can't set geometry attributes as domain is active\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /* Ensure that the geometry has been set for the domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (!dma_domain->geom_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) pr_debug("Please configure geometry before setting the number of windows\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * Ensure we have valid window count i.e. it should be less than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * maximum permissible limit and should be a power of two.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) pr_debug("Invalid window count\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) w_count > 1 ? w_count : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) kfree(dma_domain->win_arr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) dma_domain->win_arr = kcalloc(w_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) sizeof(*dma_domain->win_arr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (!dma_domain->win_arr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) dma_domain->win_cnt = w_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) enum iommu_attr attr_type, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) switch (attr_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) case DOMAIN_ATTR_GEOMETRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) ret = configure_domain_geometry(domain, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) case DOMAIN_ATTR_FSL_PAMU_STASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) ret = configure_domain_stash(dma_domain, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) case DOMAIN_ATTR_FSL_PAMU_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) ret = configure_domain_dma_state(dma_domain, *(int *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) case DOMAIN_ATTR_WINDOWS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) ret = fsl_pamu_set_windows(domain, *(u32 *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) pr_debug("Unsupported attribute type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) enum iommu_attr attr_type, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) switch (attr_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) case DOMAIN_ATTR_FSL_PAMU_STASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) memcpy(data, &dma_domain->dma_stash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) sizeof(struct pamu_stash_attribute));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) case DOMAIN_ATTR_FSL_PAMU_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) *(int *)data = dma_domain->enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) case DOMAIN_ATTR_FSL_PAMUV1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) *(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) case DOMAIN_ATTR_WINDOWS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) *(u32 *)data = dma_domain->win_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) pr_debug("Unsupported attribute type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) static struct iommu_group *get_device_iommu_group(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) group = iommu_group_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) group = iommu_group_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) u32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /* Check the PCI controller version number by readding BRR1 register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) version &= PCI_FSL_BRR1_VER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) /* If PCI controller version is >= 0x204 we can partition endpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return version >= 0x204;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /* Get iommu group information from peer devices or devices on the parent bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct pci_dev *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct pci_bus *bus = pdev->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * Traverese the pci bus device list to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * the shared iommu group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) while (bus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) list_for_each_entry(tmp, &bus->devices, bus_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (tmp == pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) group = iommu_group_get(&tmp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) bus = bus->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) struct pci_controller *pci_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) bool pci_endpt_partitioning;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) struct iommu_group *group = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) pci_ctl = pci_bus_to_host(pdev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) /* We can partition PCIe devices so assign device group to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (pci_endpt_partitioning) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) group = pci_device_group(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * PCIe controller is not a paritionable entity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * free the controller device iommu_group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (pci_ctl->parent->iommu_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) iommu_group_remove_device(pci_ctl->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * All devices connected to the controller will share the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * PCI controllers device group. If this is the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * device to be probed for the pci controller, copy the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * device group information from the PCI controller device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * node and remove the PCI controller iommu group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * For subsequent devices, the iommu group information can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * be obtained from sibling devices (i.e. from the bus_devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * link list).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (pci_ctl->parent->iommu_group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) group = get_device_iommu_group(pci_ctl->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) iommu_group_remove_device(pci_ctl->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) group = get_shared_pci_device_group(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) group = ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static struct iommu_group *fsl_pamu_device_group(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct iommu_group *group = ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * For platform devices we allocate a separate group for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * each of the devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (dev_is_pci(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) group = get_pci_device_group(to_pci_dev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) else if (of_get_property(dev->of_node, "fsl,liodn", &len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) group = get_device_iommu_group(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) return &pamu_iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) static void fsl_pamu_release_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) static const struct iommu_ops fsl_pamu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) .capable = fsl_pamu_capable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) .domain_alloc = fsl_pamu_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) .domain_free = fsl_pamu_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) .attach_dev = fsl_pamu_attach_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) .detach_dev = fsl_pamu_detach_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) .domain_window_enable = fsl_pamu_window_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) .domain_window_disable = fsl_pamu_window_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) .iova_to_phys = fsl_pamu_iova_to_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) .domain_set_attr = fsl_pamu_set_domain_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) .domain_get_attr = fsl_pamu_get_domain_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) .probe_device = fsl_pamu_probe_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) .release_device = fsl_pamu_release_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) .device_group = fsl_pamu_device_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) int __init pamu_domain_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) ret = iommu_init_mempool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) ret = iommu_device_register(&pamu_iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) iommu_device_sysfs_remove(&pamu_iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) pr_err("Can't register iommu device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }