^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Tegra host1x driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2010-2013, NVIDIA Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <trace/events/host1x.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #undef CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/dma-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "bus.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "channel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "dev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "intr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "hw/host1x01.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "hw/host1x02.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "hw/host1x04.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "hw/host1x05.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "hw/host1x06.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "hw/host1x07.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) writel(v, host1x->hv_regs + r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return readl(host1x->hv_regs + r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) writel(v, sync_regs + r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u32 host1x_sync_readl(struct host1x *host1x, u32 r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return readl(sync_regs + r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) writel(v, ch->regs + r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return readl(ch->regs + r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static const struct host1x_info host1x01_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) .nb_channels = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) .nb_pts = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) .nb_mlocks = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) .nb_bases = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) .init = host1x01_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) .sync_offset = 0x3000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) .dma_mask = DMA_BIT_MASK(32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) .has_wide_gather = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) .has_hypervisor = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) .num_sid_entries = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) .sid_table = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static const struct host1x_info host1x02_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) .nb_channels = 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) .nb_pts = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) .nb_mlocks = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) .nb_bases = 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) .init = host1x02_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) .sync_offset = 0x3000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) .dma_mask = DMA_BIT_MASK(32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) .has_wide_gather = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) .has_hypervisor = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) .num_sid_entries = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) .sid_table = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static const struct host1x_info host1x04_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) .nb_channels = 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) .nb_pts = 192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) .nb_mlocks = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) .nb_bases = 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) .init = host1x04_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) .sync_offset = 0x2100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) .dma_mask = DMA_BIT_MASK(34),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) .has_wide_gather = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) .has_hypervisor = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) .num_sid_entries = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .sid_table = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static const struct host1x_info host1x05_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) .nb_channels = 14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) .nb_pts = 192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) .nb_mlocks = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) .nb_bases = 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) .init = host1x05_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) .sync_offset = 0x2100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) .dma_mask = DMA_BIT_MASK(34),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) .has_wide_gather = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) .has_hypervisor = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) .num_sid_entries = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) .sid_table = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static const struct host1x_sid_entry tegra186_sid_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* VIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) .base = 0x1af0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) .offset = 0x30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) .limit = 0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static const struct host1x_info host1x06_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) .nb_channels = 63,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) .nb_pts = 576,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) .nb_mlocks = 24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) .nb_bases = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) .init = host1x06_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) .sync_offset = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) .dma_mask = DMA_BIT_MASK(40),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) .has_wide_gather = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) .has_hypervisor = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) .num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) .sid_table = tegra186_sid_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static const struct host1x_sid_entry tegra194_sid_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* VIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) .base = 0x1af0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) .offset = 0x30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) .limit = 0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static const struct host1x_info host1x07_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) .nb_channels = 63,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) .nb_pts = 704,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) .nb_mlocks = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) .nb_bases = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) .init = host1x07_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) .sync_offset = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) .dma_mask = DMA_BIT_MASK(40),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) .has_wide_gather = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) .has_hypervisor = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) .num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) .sid_table = tegra194_sid_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static const struct of_device_id host1x_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) { .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) MODULE_DEVICE_TABLE(of, host1x_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static void host1x_setup_sid_table(struct host1x *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) const struct host1x_info *info = host->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) for (i = 0; i < info->num_sid_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) const struct host1x_sid_entry *entry = &info->sid_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) host1x_hypervisor_writel(host, entry->offset, entry->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) host1x_hypervisor_writel(host, entry->limit, entry->base + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static bool host1x_wants_iommu(struct host1x *host1x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * If we support addressing a maximum of 32 bits of physical memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * and if the host1x firewall is enabled, there's no need to enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * IOMMU support. This can happen for example on Tegra20, Tegra30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * and Tegra114.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * Tegra124 and later can address up to 34 bits of physical memory and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * many platforms come equipped with more than 2 GiB of system memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * which requires crossing the 4 GiB boundary. But there's a catch: on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * only address up to 32 bits of memory in GATHER opcodes, which means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * that command buffers need to either be in the first 2 GiB of system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * memory (which could quickly lead to memory exhaustion), or command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * buffers need to be treated differently from other buffers (which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * not possible with the current ABI).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * A third option is to use the IOMMU in these cases to make sure all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * buffers will be mapped into a 32-bit IOVA space that host1x can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * address. This allows all of the system memory to be used and works
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * within the limitations of the host1x on these SoCs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * In summary, default to enable IOMMU on Tegra124 and later. For any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * of the earlier SoCs, only use the IOMMU for additional safety when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * the host1x firewall is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (host->dev->archdata.mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct dma_iommu_mapping *mapping =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) to_dma_iommu_mapping(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) arm_iommu_detach_device(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) arm_iommu_release_mapping(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) domain = iommu_get_domain_for_dev(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * We may not always want to enable IOMMU support (for example if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * host1x firewall is already enabled and we don't support addressing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * more than 32 bits of physical memory), so check for that first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * Similarly, if host1x is already attached to an IOMMU (via the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * API), don't try to attach again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (!host1x_wants_iommu(host) || domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) host->group = iommu_group_get(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (host->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct iommu_domain_geometry *geometry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) dma_addr_t start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) unsigned long order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) err = iova_cache_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) goto put_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) host->domain = iommu_domain_alloc(&platform_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (!host->domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) goto put_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) err = iommu_attach_group(host->domain, host->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (err == -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) goto free_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) geometry = &host->domain->geometry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) start = geometry->aperture_start & host->info->dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) end = geometry->aperture_end & host->info->dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) order = __ffs(host->domain->pgsize_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) init_iova_domain(&host->iova, 1UL << order, start >> order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) host->iova_end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) domain = host->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) free_domain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) iommu_domain_free(host->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) host->domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) put_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) iova_cache_put();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) put_group:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) iommu_group_put(host->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) host->group = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static int host1x_iommu_init(struct host1x *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) u64 mask = host->info->dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) domain = host1x_iommu_attach(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (IS_ERR(domain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) err = PTR_ERR(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * If we're not behind an IOMMU make sure we don't get push buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * that are allocated outside of the range addressable by the GATHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * opcode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * Newer generations of Tegra (Tegra186 and later) support a wide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * variant of the GATHER opcode that allows addressing more bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (!domain && !host->info->has_wide_gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) mask = DMA_BIT_MASK(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) err = dma_coerce_mask_and_coherent(host->dev, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) dev_err(host->dev, "failed to set DMA mask: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static void host1x_iommu_exit(struct host1x *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (host->domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) put_iova_domain(&host->iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) iommu_detach_group(host->domain, host->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) iommu_domain_free(host->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) host->domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) iova_cache_put();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) iommu_group_put(host->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) host->group = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static int host1x_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct host1x *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct resource *regs, *hv_regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) int syncpt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (!host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) host->info = of_device_get_match_data(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (host->info->has_hypervisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (!regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) dev_err(&pdev->dev, "failed to get vm registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) hv_regs = platform_get_resource_byname(pdev, IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) "hypervisor");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (!hv_regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) "failed to get hypervisor registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (!regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) dev_err(&pdev->dev, "failed to get registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) syncpt_irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (syncpt_irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return syncpt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) mutex_init(&host->devices_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) INIT_LIST_HEAD(&host->devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) INIT_LIST_HEAD(&host->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) host->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* set common host1x device data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) platform_set_drvdata(pdev, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) host->regs = devm_ioremap_resource(&pdev->dev, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (IS_ERR(host->regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return PTR_ERR(host->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (host->info->has_hypervisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) host->hv_regs = devm_ioremap_resource(&pdev->dev, hv_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (IS_ERR(host->hv_regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return PTR_ERR(host->hv_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) host->dev->dma_parms = &host->dma_parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) dma_set_max_seg_size(host->dev, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (host->info->init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) err = host->info->init(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) host->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (IS_ERR(host->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) err = PTR_ERR(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (err != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) dev_err(&pdev->dev, "failed to get clock: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) host->rst = devm_reset_control_get(&pdev->dev, "host1x");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (IS_ERR(host->rst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) err = PTR_ERR(host->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) dev_err(&pdev->dev, "failed to get reset: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) err = host1x_iommu_init(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) err = host1x_channel_list_init(&host->channel_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) host->info->nb_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) dev_err(&pdev->dev, "failed to initialize channel list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) goto iommu_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) err = clk_prepare_enable(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) dev_err(&pdev->dev, "failed to enable clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) goto free_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) err = reset_control_deassert(host->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) goto unprepare_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) err = host1x_syncpt_init(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) dev_err(&pdev->dev, "failed to initialize syncpts\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) goto reset_assert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) err = host1x_intr_init(host, syncpt_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) dev_err(&pdev->dev, "failed to initialize interrupts\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) goto deinit_syncpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) host1x_debug_init(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (host->info->has_hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) host1x_setup_sid_table(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) err = host1x_register(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) goto deinit_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) err = devm_of_platform_populate(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) goto unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) host1x_unregister(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) deinit_debugfs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) host1x_debug_deinit(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) host1x_intr_deinit(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) deinit_syncpt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) host1x_syncpt_deinit(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) reset_assert:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) reset_control_assert(host->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) unprepare_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) clk_disable_unprepare(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) free_channels:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) host1x_channel_list_free(&host->channel_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) iommu_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) host1x_iommu_exit(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static int host1x_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct host1x *host = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) host1x_unregister(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) host1x_debug_deinit(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) host1x_intr_deinit(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) host1x_syncpt_deinit(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) reset_control_assert(host->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) clk_disable_unprepare(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) host1x_channel_list_free(&host->channel_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) host1x_iommu_exit(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static struct platform_driver tegra_host1x_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) .name = "tegra-host1x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) .of_match_table = host1x_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) .probe = host1x_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) .remove = host1x_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static struct platform_driver * const drivers[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) &tegra_host1x_driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) &tegra_mipi_driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static int __init tegra_host1x_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) err = bus_register(&host1x_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) bus_unregister(&host1x_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) module_init(tegra_host1x_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static void __exit tegra_host1x_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) bus_unregister(&host1x_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) module_exit(tegra_host1x_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * host1x_get_dma_mask() - query the supported DMA mask for host1x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * @host1x: host1x instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * Note that this returns the supported DMA mask for host1x, which can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * different from the applicable DMA mask under certain circumstances.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) u64 host1x_get_dma_mask(struct host1x *host1x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return host1x->info->dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) EXPORT_SYMBOL(host1x_get_dma_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) MODULE_DESCRIPTION("Host1x driver for Tegra products");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) MODULE_LICENSE("GPL");