^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* pci_sun4v.c: SUN4V specific PCI controller support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/iommu-common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "pci_impl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "iommu_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "kernel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "pci_sun4v.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define DRIVER_NAME "pci_sun4v"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define PFX DRIVER_NAME ": "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static unsigned long vpci_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static unsigned long vpci_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct vpci_version {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* Ordered from largest major to lowest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static struct vpci_version vpci_versions[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) { .major = 2, .minor = 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) { .major = 1, .minor = 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static unsigned long vatu_major = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static unsigned long vatu_minor = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct iommu_batch {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct device *dev; /* Device mapping is for. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned long prot; /* IOMMU page protections */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) unsigned long entry; /* Index into IOTSB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u64 *pglist; /* List of physical pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned long npages; /* Number of pages in list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static int iommu_batch_initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* Interrupts must be disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) p->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) p->prot = prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) p->entry = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) p->npages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return iommu->atu && mask > DMA_BIT_MASK(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* Interrupts must be disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) u64 *pglist = p->pglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u64 index_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long devhandle = pbm->devhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) unsigned long prot = p->prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned long entry = p->entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) unsigned long npages = p->npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned long iotsb_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) long num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* VPCI maj=1, min=[0,1] only supports read and write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (vpci_major < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) while (npages != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (!iommu_use_atu(pbm->iommu, mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) num = pci_sun4v_iommu_map(devhandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) HV_PCI_TSBID(0, entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) npages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) __pa(pglist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (unlikely(num < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) devhandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) HV_PCI_TSBID(0, entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) npages, prot, __pa(pglist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ret = pci_sun4v_iotsb_map(devhandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) iotsb_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) index_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) __pa(pglist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) &num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (unlikely(ret != HV_EOK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) devhandle, iotsb_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) index_count, prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) __pa(pglist), ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) entry += num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) npages -= num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) pglist += num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) p->entry = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) p->npages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (p->entry + p->npages == entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (p->entry != ~0UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) iommu_batch_flush(p, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) p->entry = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* Interrupts must be disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static inline long iommu_batch_add(u64 phys_page, u64 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) BUG_ON(p->npages >= PGLIST_NENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) p->pglist[p->npages++] = phys_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (p->npages == PGLIST_NENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return iommu_batch_flush(p, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* Interrupts must be disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static inline long iommu_batch_end(u64 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) BUG_ON(p->npages >= PGLIST_NENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return iommu_batch_flush(p, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) dma_addr_t *dma_addrp, gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) unsigned long flags, order, first_page, npages, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned long prot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct iommu_map_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) long entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) size = IO_PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) order = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (unlikely(order >= MAX_ORDER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) npages = size >> IO_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (attrs & DMA_ATTR_WEAK_ORDERING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) nid = dev->archdata.numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) page = alloc_pages_node(nid, gfp, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (unlikely(!page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) first_page = (unsigned long) page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) memset((char *)first_page, 0, PAGE_SIZE << order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) mask = dev->coherent_dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (!iommu_use_atu(iommu, mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) tbl = &iommu->tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) tbl = &iommu->atu->tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) (unsigned long)(-1), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (unlikely(entry == IOMMU_ERROR_CODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) goto range_alloc_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ret = (void *) first_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) first_page = __pa(first_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) iommu_batch_start(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) (HV_PCI_MAP_ATTR_READ | prot |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) HV_PCI_MAP_ATTR_WRITE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) for (n = 0; n < npages; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (unlikely(err < 0L))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) goto iommu_map_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (unlikely(iommu_batch_end(mask) < 0L))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) goto iommu_map_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) iommu_map_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) range_alloc_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) free_pages(first_page, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) unsigned long iotsb_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct pci_bus *bus_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) unsigned long err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) unsigned int bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) unsigned int device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned int fun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (pdev->subordinate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* No need to bind pci bridge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) dma_4v_iotsb_bind(devhandle, iotsb_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) pdev->subordinate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) bus = bus_dev->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) device = PCI_SLOT(pdev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) fun = PCI_FUNC(pdev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) HV_PCI_DEVICE_BUILD(bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) fun));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* If bind fails for one device it is going to fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * for rest of the devices because we are sharing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * IOTSB. So in case of failure simply return with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) dma_addr_t dvma, unsigned long iotsb_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) unsigned long entry, unsigned long npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) unsigned long num, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (dvma <= DMA_BIT_MASK(32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) num = pci_sun4v_iommu_demap(devhandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) HV_PCI_TSBID(0, entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) entry, npages, &num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (unlikely(ret != HV_EOK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) entry += num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) npages -= num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) } while (npages != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) dma_addr_t dvma, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct pci_pbm_info *pbm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct atu *atu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct iommu_map_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) unsigned long order, npages, entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) unsigned long iotsb_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) u32 devhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) pbm = dev->archdata.host_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) atu = iommu->atu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) devhandle = pbm->devhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (!iommu_use_atu(iommu, dvma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) tbl = &iommu->tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) iotsb_num = 0; /* we don't care for legacy iommu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) tbl = &atu->tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) iotsb_num = atu->iotsb->iotsb_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) order = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (order < 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) free_pages((unsigned long)cpu, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) unsigned long offset, size_t sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct atu *atu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct iommu_map_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) unsigned long flags, npages, oaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) unsigned long i, base_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) unsigned long prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) dma_addr_t bus_addr, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) long entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) atu = iommu->atu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (unlikely(direction == DMA_NONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) oaddr = (unsigned long)(page_address(page) + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) npages >>= IO_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) mask = *dev->dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!iommu_use_atu(iommu, mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) tbl = &iommu->tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) tbl = &atu->tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) (unsigned long)(-1), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (unlikely(entry == IOMMU_ERROR_CODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) base_paddr = __pa(oaddr & IO_PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) prot = HV_PCI_MAP_ATTR_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (direction != DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) prot |= HV_PCI_MAP_ATTR_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (attrs & DMA_ATTR_WEAK_ORDERING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) iommu_batch_start(dev, prot, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) long err = iommu_batch_add(base_paddr, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (unlikely(err < 0L))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) goto iommu_map_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (unlikely(iommu_batch_end(mask) < 0L))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) goto iommu_map_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) iommu_map_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) size_t sz, enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct pci_pbm_info *pbm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct atu *atu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct iommu_map_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) unsigned long iotsb_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) long entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) u32 devhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (unlikely(direction == DMA_NONE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) pbm = dev->archdata.host_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) atu = iommu->atu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) devhandle = pbm->devhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) npages >>= IO_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) bus_addr &= IO_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (bus_addr <= DMA_BIT_MASK(32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) iotsb_num = 0; /* we don't care for legacy iommu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) tbl = &iommu->tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) iotsb_num = atu->iotsb->iotsb_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) tbl = &atu->tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) int nelems, enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct scatterlist *s, *outs, *segstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) unsigned long flags, handle, prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) dma_addr_t dma_next = 0, dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) unsigned int max_seg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) unsigned long seg_boundary_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) int outcount, incount, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct atu *atu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct iommu_map_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) unsigned long base_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) long err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) BUG_ON(direction == DMA_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (nelems == 0 || !iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) atu = iommu->atu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) prot = HV_PCI_MAP_ATTR_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (direction != DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) prot |= HV_PCI_MAP_ATTR_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (attrs & DMA_ATTR_WEAK_ORDERING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) outs = s = segstart = &sglist[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) outcount = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) incount = nelems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /* Init first segment length for backout at failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) outs->dma_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) iommu_batch_start(dev, prot, ~0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) max_seg_size = dma_get_max_seg_size(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) mask = *dev->dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (!iommu_use_atu(iommu, mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) tbl = &iommu->tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) tbl = &atu->tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) for_each_sg(sglist, s, nelems, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) unsigned long paddr, npages, entry, out_entry = 0, slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) slen = s->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (slen == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) dma_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* Allocate iommu entries for that segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) entry = iommu_tbl_range_alloc(dev, tbl, npages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) &handle, (unsigned long)(-1), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* Handle failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (unlikely(entry == IOMMU_ERROR_CODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) tbl, paddr, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) goto iommu_map_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) iommu_batch_new_entry(entry, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* Convert entry to a dma_addr_t */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) dma_addr |= (s->offset & ~IO_PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /* Insert into HW table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) paddr &= IO_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) while (npages--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) err = iommu_batch_add(paddr, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (unlikely(err < 0L))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) goto iommu_map_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) paddr += IO_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* If we are in an open segment, try merging */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (segstart != s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* We cannot merge if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * - allocated dma_addr isn't contiguous to previous allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if ((dma_addr != dma_next) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) (outs->dma_length + s->length > max_seg_size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) (is_span_boundary(out_entry, base_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) seg_boundary_size, outs, s))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* Can't merge: create a new segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) segstart = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) outcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) outs = sg_next(outs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) outs->dma_length += s->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (segstart == s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* This is a new segment, fill entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) outs->dma_address = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) outs->dma_length = slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) out_entry = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /* Calculate next page pointer for contiguous check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) dma_next = dma_addr + slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) err = iommu_batch_end(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (unlikely(err < 0L))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) goto iommu_map_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (outcount < incount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) outs = sg_next(outs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) outs->dma_address = DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) outs->dma_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return outcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) iommu_map_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) for_each_sg(sglist, s, nelems, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (s->dma_length != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) unsigned long vaddr, npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) vaddr = s->dma_address & IO_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) npages = iommu_num_pages(s->dma_address, s->dma_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) IO_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) iommu_tbl_range_free(tbl, vaddr, npages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) IOMMU_ERROR_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* XXX demap? XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) s->dma_address = DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) s->dma_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (s == outs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) int nelems, enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct pci_pbm_info *pbm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct atu *atu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) unsigned long flags, entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) unsigned long iotsb_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) u32 devhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) BUG_ON(direction == DMA_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) pbm = dev->archdata.host_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) atu = iommu->atu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) devhandle = pbm->devhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) sg = sglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) while (nelems--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) dma_addr_t dma_handle = sg->dma_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) unsigned int len = sg->dma_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct iommu_map_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) unsigned long shift = IO_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (dma_handle <= DMA_BIT_MASK(32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) iotsb_num = 0; /* we don't care for legacy iommu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) tbl = &iommu->tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) iotsb_num = atu->iotsb->iotsb_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) tbl = &atu->tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) entry = ((dma_handle - tbl->table_map_base) >> shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) entry, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) iommu_tbl_range_free(tbl, dma_handle, npages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) IOMMU_ERROR_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static int dma_4v_supported(struct device *dev, u64 device_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct iommu *iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (ali_sound_dma_hack(dev, device_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (device_mask < iommu->dma_addr_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) static const struct dma_map_ops sun4v_dma_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) .alloc = dma_4v_alloc_coherent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) .free = dma_4v_free_coherent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) .map_page = dma_4v_map_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) .unmap_page = dma_4v_unmap_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) .map_sg = dma_4v_map_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) .unmap_sg = dma_4v_unmap_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) .dma_supported = dma_4v_supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct property *prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct device_node *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) dp = pbm->op->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) prop = of_find_property(dp, "66mhz-capable", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) pbm->is_66mhz_capable = (prop != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /* XXX register error interrupt handlers XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct iommu_map_table *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct iommu_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) unsigned long i, pool_nr, cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) u32 devhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) devhandle = pbm->devhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) pool = &(iommu->pools[pool_nr]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) for (i = pool->start; i <= pool->end; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) unsigned long ret, io_attrs, ra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) ret = pci_sun4v_iommu_getmap(devhandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) HV_PCI_TSBID(0, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) &io_attrs, &ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (ret == HV_EOK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (page_in_phys_avail(ra)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) pci_sun4v_iommu_demap(devhandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) HV_PCI_TSBID(0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) i), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) __set_bit(i, iommu->map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct atu *atu = pbm->iommu->atu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct atu_iotsb *iotsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) void *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) u64 table_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) u64 iotsb_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) unsigned long order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) unsigned long err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (!iotsb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) atu->iotsb = iotsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* calculate size of IOTSB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) table_size = (atu->size / IO_PAGE_SIZE) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) order = get_order(table_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (!table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) goto table_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) iotsb->table = table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) iotsb->ra = __pa(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) iotsb->dvma_size = atu->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) iotsb->dvma_base = atu->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) iotsb->table_size = table_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) iotsb->page_size = IO_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /* configure and register IOTSB with HV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) err = pci_sun4v_iotsb_conf(pbm->devhandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) iotsb->ra,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) iotsb->table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) iotsb->page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) iotsb->dvma_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) &iotsb_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) goto iotsb_conf_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) iotsb->iotsb_num = iotsb_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) goto iotsb_conf_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) iotsb_conf_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) free_pages((unsigned long)table, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) table_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) kfree(iotsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct atu *atu = pbm->iommu->atu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) unsigned long err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) const u64 *ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) u64 map_size, num_iotte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) u64 dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) const u32 *page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (!ranges) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) pr_err(PFX "No iommu-address-ranges\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (!page_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) pr_err(PFX "No iommu-pagesizes\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /* There are 4 iommu-address-ranges supported. Each range is pair of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * {base, size}. The ranges[0] and ranges[1] are 32bit address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * address ranges to support 64bit addressing. Because 'size' for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * address ranges[2] and ranges[3] are same we can select either of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * ranges[2] or ranges[3] for mapping. However due to 'size' is too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * large for OS to allocate IOTSB we are using fix size 32G
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * to share.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) atu->ranges = (struct atu_ranges *)ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) atu->base = atu->ranges[3].base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) atu->size = ATU_64_SPACE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /* Create IOTSB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) err = pci_sun4v_atu_alloc_iotsb(pbm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) pr_err(PFX "Error creating ATU IOTSB\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /* Create ATU iommu map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * One bit represents one iotte in IOTSB table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) num_iotte = atu->size / IO_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) map_size = num_iotte / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) atu->tbl.table_map_base = atu->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) atu->dma_addr_mask = dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (!atu->tbl.map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) NULL, false /* no large_pool */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 0 /* default npools */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) false /* want span boundary checking */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct iommu *iommu = pbm->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) unsigned long num_tsb_entries, sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) u32 dma_mask, dma_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) const u32 *vdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (!vdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) vdma = vdma_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) vdma[0], vdma[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) dma_offset = vdma[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /* Setup initial software IOMMU state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) spin_lock_init(&iommu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) iommu->ctx_lowest_free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) iommu->tbl.table_map_base = dma_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) iommu->dma_addr_mask = dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /* Allocate and initialize the free area map. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) sz = (num_tsb_entries + 7) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) sz = (sz + 7UL) & ~7UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (!iommu->tbl.map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) NULL, false /* no large_pool */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) 0 /* default npools */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) false /* want span boundary checking */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) sz = probe_existing_entries(pbm, &iommu->tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) printk("%s: Imported %lu TSB entries from OBP\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) pbm->name, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) struct pci_sun4v_msiq_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) u64 version_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) #define MSIQ_VERSION_MASK 0xffffffff00000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) #define MSIQ_VERSION_SHIFT 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) #define MSIQ_TYPE_MASK 0x00000000000000ffUL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) #define MSIQ_TYPE_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) #define MSIQ_TYPE_NONE 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) #define MSIQ_TYPE_MSG 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) #define MSIQ_TYPE_MSI32 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) #define MSIQ_TYPE_MSI64 0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) #define MSIQ_TYPE_INTX 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) #define MSIQ_TYPE_NONE2 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) u64 intx_sysino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) u64 reserved1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) u64 stick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) u64 req_id; /* bus/device/func */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) #define MSIQ_REQID_BUS_MASK 0xff00UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) #define MSIQ_REQID_BUS_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) #define MSIQ_REQID_DEVICE_SHIFT 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) #define MSIQ_REQID_FUNC_MASK 0x0007UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) #define MSIQ_REQID_FUNC_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) u64 msi_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /* The format of this value is message type dependent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * For MSI bits 15:0 are the data from the MSI packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * For MSI-X bits 31:0 are the data from the MSI packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * For MSG, the message code and message routing code where:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * bits 39:32 is the bus/device/fn of the msg target-id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * bits 18:16 is the message routing code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * bits 7:0 is the message code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * For INTx the low order 2-bits are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * 00 - INTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * 01 - INTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * 10 - INTC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * 11 - INTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) u64 msi_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) u64 reserved2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) unsigned long *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) unsigned long err, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (unlikely(*head >= limit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) unsigned long msiqid, unsigned long *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) unsigned long *msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct pci_sun4v_msiq_entry *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) unsigned long err, type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /* Note: void pointer arithmetic, 'head' is a byte offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) (pbm->msiq_ent_count *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) sizeof(struct pci_sun4v_msiq_entry))) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) *head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (unlikely(type != MSIQ_TYPE_MSI32 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) type != MSIQ_TYPE_MSI64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) *msi = ep->msi_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) err = pci_sun4v_msi_setstate(pbm->devhandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) ep->msi_data /* msi_num */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) HV_MSISTATE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /* Clear the entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) ep->version_type &= ~MSIQ_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) (*head) += sizeof(struct pci_sun4v_msiq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (*head >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) *head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) unsigned long head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) unsigned long err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) unsigned long msi, int is_msi64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) (is_msi64 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) unsigned long err, msiqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) unsigned long q_size, alloc_size, pages, order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) alloc_size = (pbm->msiq_num * q_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) order = get_order(alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (pages == 0UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) memset((char *)pages, 0, PAGE_SIZE << order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) pbm->msi_queues = (void *) pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) for (i = 0; i < pbm->msiq_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) unsigned long err, base = __pa(pages + (i * q_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) unsigned long ret1, ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) err = pci_sun4v_msiq_conf(pbm->devhandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) pbm->msiq_first + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) base, pbm->msiq_ent_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) goto h_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) err = pci_sun4v_msiq_info(pbm->devhandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) pbm->msiq_first + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) &ret1, &ret2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) goto h_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (ret1 != base || ret2 != pbm->msiq_ent_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) printk(KERN_ERR "MSI: Bogus qconf "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) "expected[%lx:%x] got[%lx:%lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) base, pbm->msiq_ent_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) ret1, ret2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) goto h_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) h_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) free_pages(pages, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) unsigned long q_size, alloc_size, pages, order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) for (i = 0; i < pbm->msiq_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) unsigned long msiqid = pbm->msiq_first + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) alloc_size = (pbm->msiq_num * q_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) order = get_order(alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) pages = (unsigned long) pbm->msi_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) free_pages(pages, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) pbm->msi_queues = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) unsigned long msiqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) unsigned long devino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (!irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) .get_head = pci_sun4v_get_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) .dequeue_msi = pci_sun4v_dequeue_msi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) .set_head = pci_sun4v_set_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) .msi_setup = pci_sun4v_msi_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) .msi_teardown = pci_sun4v_msi_teardown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) .msiq_alloc = pci_sun4v_msiq_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) .msiq_free = pci_sun4v_msiq_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) .msiq_build_irq = pci_sun4v_msiq_build_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) #else /* CONFIG_PCI_MSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) #endif /* !(CONFIG_PCI_MSI) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct platform_device *op, u32 devhandle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) struct device_node *dp = op->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) pbm->numa_node = of_node_to_nid(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) pbm->pci_ops = &sun4v_pci_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) pbm->config_space_reg_bits = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) pbm->index = pci_num_pbms++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) pbm->op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) pbm->devhandle = devhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) pbm->name = dp->full_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) printk("%s: SUN4V PCI Bus Module\n", pbm->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) pci_determine_mem_io_space(pbm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) pci_get_pbm_props(pbm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) err = pci_sun4v_iommu_init(pbm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) pci_sun4v_msi_init(pbm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) pci_sun4v_scan_bus(pbm, &op->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) /* if atu_init fails its not complete failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * we can still continue using legacy iommu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (pbm->iommu->atu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) err = pci_sun4v_atu_init(pbm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) kfree(pbm->iommu->atu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) pbm->iommu->atu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) pr_err(PFX "ATU init failed, err=%d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) pbm->next = pci_pbm_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) pci_pbm_root = pbm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static int pci_sun4v_probe(struct platform_device *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) const struct linux_prom64_registers *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static int hvapi_negotiated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) struct pci_pbm_info *pbm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) struct device_node *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) struct iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) struct atu *atu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) u32 devhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) int i, err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) static bool hv_atu = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) dp = op->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (!hvapi_negotiated++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) vpci_major = vpci_versions[i].major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) vpci_minor = vpci_versions[i].minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) &vpci_minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) pr_err(PFX "Could not register hvapi, err=%d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) vpci_major, vpci_minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /* don't return an error if we fail to register the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * ATU group, but ATU hcalls won't be available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) hv_atu = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) vatu_major, vatu_minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) dma_ops = &sun4v_dma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) regs = of_get_property(dp, "reg", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (!regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) printk(KERN_ERR PFX "Could not find config registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (!iommu_batch_initialized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) unsigned long page = get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) per_cpu(iommu_batch, i).pglist = (u64 *) page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) iommu_batch_initialized = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (!pbm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (!iommu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) goto out_free_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) pbm->iommu = iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) iommu->atu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (hv_atu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) atu = kzalloc(sizeof(*atu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (!atu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) pr_err(PFX "Could not allocate atu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) iommu->atu = atu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) err = pci_sun4v_pbm_init(pbm, op, devhandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) goto out_free_iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) dev_set_drvdata(&op->dev, pbm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) out_free_iommu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) kfree(iommu->atu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) kfree(pbm->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) out_free_controller:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) kfree(pbm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) static const struct of_device_id pci_sun4v_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) .name = "pci",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) .compatible = "SUNW,sun4v-pci",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) static struct platform_driver pci_sun4v_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) .name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) .of_match_table = pci_sun4v_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) .probe = pci_sun4v_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) static int __init pci_sun4v_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) return platform_driver_register(&pci_sun4v_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) subsys_initcall(pci_sun4v_init);