^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Tegra host1x Job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2010-2015, NVIDIA Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/host1x.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kref.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <trace/events/host1x.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "channel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "dev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "job.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "syncpt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define HOST1X_WAIT_SYNCPT_OFFSET 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) u32 num_cmdbufs, u32 num_relocs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct host1x_job *job = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned int num_unpins = num_relocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) u64 total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) void *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) num_unpins += num_cmdbufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Check that we're not going to overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) total = sizeof(struct host1x_job) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) (u64)num_relocs * sizeof(struct host1x_reloc) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) (u64)num_unpins * sizeof(dma_addr_t) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) (u64)num_unpins * sizeof(u32 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (total > ULONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) mem = job = kzalloc(total, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (!job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) kref_init(&job->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) job->channel = ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Redistribute memory to the structs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) mem += sizeof(struct host1x_job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) job->relocs = num_relocs ? mem : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) mem += num_relocs * sizeof(struct host1x_reloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) job->unpins = num_unpins ? mem : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) mem += num_unpins * sizeof(struct host1x_job_unpin_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) job->gathers = num_cmdbufs ? mem : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) mem += num_cmdbufs * sizeof(struct host1x_job_gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) job->addr_phys = num_unpins ? mem : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) job->reloc_addr_phys = job->addr_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) job->gather_addr_phys = &job->addr_phys[num_relocs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) EXPORT_SYMBOL(host1x_job_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct host1x_job *host1x_job_get(struct host1x_job *job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) kref_get(&job->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) EXPORT_SYMBOL(host1x_job_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static void job_free(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct host1x_job *job = container_of(ref, struct host1x_job, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) kfree(job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) void host1x_job_put(struct host1x_job *job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) kref_put(&job->ref, job_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) EXPORT_SYMBOL(host1x_job_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned int words, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) gather->words = words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) gather->bo = bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) gather->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) job->num_gathers++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) EXPORT_SYMBOL(host1x_job_add_gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct host1x_client *client = job->client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct device *dev = client->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct host1x_job_gather *g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) domain = iommu_get_domain_for_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) job->num_unpins = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) for (i = 0; i < job->num_relocs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct host1x_reloc *reloc = &job->relocs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) dma_addr_t phys_addr, *phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct sg_table *sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) reloc->target.bo = host1x_bo_get(reloc->target.bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (!reloc->target.bo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) goto unpin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * If the client device is not attached to an IOMMU, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * physical address of the buffer object can be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * Similarly, when an IOMMU domain is shared between all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * host1x clients, the IOVA is already available, so no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * need to map the buffer object again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * XXX Note that this isn't always safe to do because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * relies on an assumption that no cache maintenance is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * needed on the buffer objects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (!domain || client->group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) phys = &phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) phys = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (IS_ERR(sgt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) err = PTR_ERR(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) goto unpin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (sgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned long mask = HOST1X_RELOC_READ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) HOST1X_RELOC_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) enum dma_data_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) switch (reloc->flags & mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) case HOST1X_RELOC_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) dir = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) case HOST1X_RELOC_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) dir = DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) dir = DMA_BIDIRECTIONAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) goto unpin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) err = dma_map_sgtable(dev, sgt, dir, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) goto unpin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) job->unpins[job->num_unpins].dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) job->unpins[job->num_unpins].dir = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) phys_addr = sg_dma_address(sgt->sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) job->addr_phys[job->num_unpins] = phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) job->unpins[job->num_unpins].bo = reloc->target.bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) job->unpins[job->num_unpins].sgt = sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) job->num_unpins++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * We will copy gathers BO content later, so there is no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * hold and pin them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) for (i = 0; i < job->num_gathers; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) size_t gather_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct sg_table *sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dma_addr_t phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) unsigned long shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct iova *alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) dma_addr_t *phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) unsigned int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) g = &job->gathers[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) g->bo = host1x_bo_get(g->bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (!g->bo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) goto unpin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * If the host1x is not attached to an IOMMU, there is no need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * to map the buffer object for the host1x, since the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * address can simply be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (!iommu_get_domain_for_dev(host->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) phys = &phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) phys = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) sgt = host1x_bo_pin(host->dev, g->bo, phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (IS_ERR(sgt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) err = PTR_ERR(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) goto put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (host->domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) for_each_sgtable_sg(sgt, sg, j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) gather_size += sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) gather_size = iova_align(&host->iova, gather_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) shift = iova_shift(&host->iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) alloc = alloc_iova(&host->iova, gather_size >> shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) host->iova_end >> shift, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (!alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) goto put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) err = iommu_map_sgtable(host->domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) iova_dma_addr(&host->iova, alloc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) sgt, IOMMU_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) __free_iova(&host->iova, alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) goto put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) job->unpins[job->num_unpins].size = gather_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) phys_addr = iova_dma_addr(&host->iova, alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) } else if (sgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) goto put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) job->unpins[job->num_unpins].dev = host->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) phys_addr = sg_dma_address(sgt->sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) job->addr_phys[job->num_unpins] = phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) job->gather_addr_phys[i] = phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) job->unpins[job->num_unpins].bo = g->bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) job->unpins[job->num_unpins].sgt = sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) job->num_unpins++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) host1x_bo_put(g->bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) unpin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) host1x_job_unpin(job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) void *cmdbuf_addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct host1x_bo *cmdbuf = g->bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* pin & patch the relocs for one gather */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) for (i = 0; i < job->num_relocs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct host1x_reloc *reloc = &job->relocs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) u32 reloc_addr = (job->reloc_addr_phys[i] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) reloc->target.offset) >> reloc->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) u32 *target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /* skip all other gathers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (cmdbuf != reloc->cmdbuf.bo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) target = (u32 *)job->gather_copy_mapped +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) reloc->cmdbuf.offset / sizeof(u32) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) g->offset / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) goto patch_reloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (!cmdbuf_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) cmdbuf_addr = host1x_bo_mmap(cmdbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (unlikely(!cmdbuf_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) pr_err("Could not map cmdbuf for relocation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) target = cmdbuf_addr + reloc->cmdbuf.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) patch_reloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) *target = reloc_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (cmdbuf_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) host1x_bo_munmap(cmdbuf, cmdbuf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) offset *= sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* relocation shift value validation isn't implemented yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (reloc->shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct host1x_firewall {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct host1x_job *job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) unsigned int num_relocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct host1x_reloc *reloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct host1x_bo *cmdbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) u32 words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) u32 class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static int check_register(struct host1x_firewall *fw, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (!fw->job->is_addr_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (!fw->num_relocs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) fw->num_relocs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) fw->reloc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static int check_class(struct host1x_firewall *fw, u32 class)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (!fw->job->is_valid_class) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (fw->class != class)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!fw->job->is_valid_class(fw->class))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static int check_mask(struct host1x_firewall *fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) u32 mask = fw->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) u32 reg = fw->reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) while (mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (fw->words == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (mask & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) ret = check_register(fw, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) fw->words--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) fw->offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) mask >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) reg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static int check_incr(struct host1x_firewall *fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) u32 count = fw->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) u32 reg = fw->reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (fw->words == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) ret = check_register(fw, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) reg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) fw->words--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) fw->offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static int check_nonincr(struct host1x_firewall *fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) u32 count = fw->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (fw->words == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) ret = check_register(fw, fw->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) fw->words--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) fw->offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) (g->offset / sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) u32 job_class = fw->class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) fw->words = g->words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) fw->cmdbuf = g->bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) fw->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) while (fw->words && !err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) u32 word = cmdbuf_base[fw->offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) u32 opcode = (word & 0xf0000000) >> 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) fw->mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) fw->reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) fw->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) fw->words--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) fw->offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) fw->class = word >> 6 & 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) fw->mask = word & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) fw->reg = word >> 16 & 0xfff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) err = check_class(fw, job_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) err = check_mask(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) fw->reg = word >> 16 & 0xfff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) fw->count = word & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) err = check_incr(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) fw->reg = word >> 16 & 0xfff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) fw->count = word & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) err = check_nonincr(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) fw->mask = word & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) fw->reg = word >> 16 & 0xfff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) err = check_mask(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) case 14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static inline int copy_gathers(struct device *host, struct host1x_job *job,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct host1x_firewall fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) size_t size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) size_t offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) fw.job = job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) fw.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) fw.reloc = job->relocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) fw.num_relocs = job->num_relocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) fw.class = job->class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) for (i = 0; i < job->num_gathers; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct host1x_job_gather *g = &job->gathers[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) size += g->words * sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * Try a non-blocking allocation from a higher priority pools first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * as awaiting for the allocation here is a major performance hit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* the higher priority allocation failed, try the generic-blocking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (!job->gather_copy_mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) job->gather_copy_mapped = dma_alloc_wc(host, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) &job->gather_copy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (!job->gather_copy_mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) job->gather_copy_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) for (i = 0; i < job->num_gathers; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct host1x_job_gather *g = &job->gathers[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) void *gather;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* Copy the gather */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) gather = host1x_bo_mmap(g->bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) memcpy(job->gather_copy_mapped + offset, gather + g->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) g->words * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) host1x_bo_munmap(g->bo, gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /* Store the location in the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) g->base = job->gather_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) g->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* Validate the job */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (validate(&fw, g))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) offset += g->words * sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /* No relocs should remain at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (fw.num_relocs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) int host1x_job_pin(struct host1x_job *job, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct host1x *host = dev_get_drvdata(dev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /* pin memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) err = pin_job(host, job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) err = copy_gathers(host->dev, job, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /* patch gathers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) for (i = 0; i < job->num_gathers; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct host1x_job_gather *g = &job->gathers[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* process each gather mem only once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (g->handled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /* copy_gathers() sets gathers base if firewall is enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) g->base = job->gather_addr_phys[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) for (j = i + 1; j < job->num_gathers; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (job->gathers[j].bo == g->bo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) job->gathers[j].handled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) job->gathers[j].base = g->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) err = do_relocs(job, g);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) host1x_job_unpin(job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) EXPORT_SYMBOL(host1x_job_pin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) void host1x_job_unpin(struct host1x_job *job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) for (i = 0; i < job->num_unpins; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct host1x_job_unpin_data *unpin = &job->unpins[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct device *dev = unpin->dev ?: host->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct sg_table *sgt = unpin->sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) unpin->size && host->domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) iommu_unmap(host->domain, job->addr_phys[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) unpin->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) free_iova(&host->iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) iova_pfn(&host->iova, job->addr_phys[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (unpin->dev && sgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) host1x_bo_unpin(dev, unpin->bo, sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) host1x_bo_put(unpin->bo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) job->num_unpins = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (job->gather_copy_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) dma_free_wc(host->dev, job->gather_copy_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) job->gather_copy_mapped, job->gather_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) EXPORT_SYMBOL(host1x_job_unpin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * Debug routine used to dump job entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) void host1x_job_dump(struct device *dev, struct host1x_job *job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }