Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2014 NVIDIA CORPORATION.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <soc/tegra/fuse.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "mc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) static const struct of_device_id tegra_mc_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #ifdef CONFIG_ARCH_TEGRA_2x_SOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	{ .compatible = "nvidia,tegra20-mc-gart", .data = &tegra20_mc_soc },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #ifdef CONFIG_ARCH_TEGRA_3x_SOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	{ .compatible = "nvidia,tegra30-mc", .data = &tegra30_mc_soc },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #ifdef CONFIG_ARCH_TEGRA_114_SOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	{ .compatible = "nvidia,tegra114-mc", .data = &tegra114_mc_soc },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #ifdef CONFIG_ARCH_TEGRA_124_SOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	{ .compatible = "nvidia,tegra124-mc", .data = &tegra124_mc_soc },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #ifdef CONFIG_ARCH_TEGRA_132_SOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	{ .compatible = "nvidia,tegra132-mc", .data = &tegra132_mc_soc },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #ifdef CONFIG_ARCH_TEGRA_210_SOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	{ .compatible = "nvidia,tegra210-mc", .data = &tegra210_mc_soc },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) MODULE_DEVICE_TABLE(of, tegra_mc_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) static int tegra_mc_block_dma_common(struct tegra_mc *mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 				     const struct tegra_mc_reset *rst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	spin_lock_irqsave(&mc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	value = mc_readl(mc, rst->control) | BIT(rst->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	mc_writel(mc, value, rst->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	spin_unlock_irqrestore(&mc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) static bool tegra_mc_dma_idling_common(struct tegra_mc *mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 				       const struct tegra_mc_reset *rst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	return (mc_readl(mc, rst->status) & BIT(rst->bit)) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) static int tegra_mc_unblock_dma_common(struct tegra_mc *mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 				       const struct tegra_mc_reset *rst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	spin_lock_irqsave(&mc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	value = mc_readl(mc, rst->control) & ~BIT(rst->bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	mc_writel(mc, value, rst->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	spin_unlock_irqrestore(&mc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static int tegra_mc_reset_status_common(struct tegra_mc *mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 					const struct tegra_mc_reset *rst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	return (mc_readl(mc, rst->control) & BIT(rst->bit)) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) const struct tegra_mc_reset_ops tegra_mc_reset_ops_common = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	.block_dma = tegra_mc_block_dma_common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	.dma_idling = tegra_mc_dma_idling_common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	.unblock_dma = tegra_mc_unblock_dma_common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	.reset_status = tegra_mc_reset_status_common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) static inline struct tegra_mc *reset_to_mc(struct reset_controller_dev *rcdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	return container_of(rcdev, struct tegra_mc, reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static const struct tegra_mc_reset *tegra_mc_reset_find(struct tegra_mc *mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 							unsigned long id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	for (i = 0; i < mc->soc->num_resets; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		if (mc->soc->resets[i].id == id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 			return &mc->soc->resets[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static int tegra_mc_hotreset_assert(struct reset_controller_dev *rcdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 				    unsigned long id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	struct tegra_mc *mc = reset_to_mc(rcdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	const struct tegra_mc_reset_ops *rst_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	const struct tegra_mc_reset *rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	int retries = 500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	rst = tegra_mc_reset_find(mc, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	if (!rst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	rst_ops = mc->soc->reset_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	if (!rst_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if (rst_ops->block_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		/* block clients DMA requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		err = rst_ops->block_dma(mc, rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 			dev_err(mc->dev, "failed to block %s DMA: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 				rst->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	if (rst_ops->dma_idling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		/* wait for completion of the outstanding DMA requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		while (!rst_ops->dma_idling(mc, rst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			if (!retries--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 				dev_err(mc->dev, "failed to flush %s DMA\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 					rst->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 				return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			usleep_range(10, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	if (rst_ops->hotreset_assert) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		/* clear clients DMA requests sitting before arbitration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		err = rst_ops->hotreset_assert(mc, rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			dev_err(mc->dev, "failed to hot reset %s: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 				rst->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static int tegra_mc_hotreset_deassert(struct reset_controller_dev *rcdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 				      unsigned long id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	struct tegra_mc *mc = reset_to_mc(rcdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	const struct tegra_mc_reset_ops *rst_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	const struct tegra_mc_reset *rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	rst = tegra_mc_reset_find(mc, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (!rst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	rst_ops = mc->soc->reset_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	if (!rst_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	if (rst_ops->hotreset_deassert) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		/* take out client from hot reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		err = rst_ops->hotreset_deassert(mc, rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 			dev_err(mc->dev, "failed to deassert hot reset %s: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 				rst->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	if (rst_ops->unblock_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		/* allow new DMA requests to proceed to arbitration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		err = rst_ops->unblock_dma(mc, rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			dev_err(mc->dev, "failed to unblock %s DMA : %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 				rst->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static int tegra_mc_hotreset_status(struct reset_controller_dev *rcdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 				    unsigned long id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	struct tegra_mc *mc = reset_to_mc(rcdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	const struct tegra_mc_reset_ops *rst_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	const struct tegra_mc_reset *rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	rst = tegra_mc_reset_find(mc, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	if (!rst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	rst_ops = mc->soc->reset_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if (!rst_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	return rst_ops->reset_status(mc, rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static const struct reset_control_ops tegra_mc_reset_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	.assert = tegra_mc_hotreset_assert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	.deassert = tegra_mc_hotreset_deassert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	.status = tegra_mc_hotreset_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static int tegra_mc_reset_setup(struct tegra_mc *mc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	mc->reset.ops = &tegra_mc_reset_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	mc->reset.owner = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	mc->reset.of_node = mc->dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	mc->reset.of_reset_n_cells = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	mc->reset.nr_resets = mc->soc->num_resets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	err = reset_controller_register(&mc->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	unsigned long long tick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	/* compute the number of MC clock cycles per tick */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	do_div(tick, NSEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	value = mc_readl(mc, MC_EMEM_ARB_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	value &= ~MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	value |= MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(tick);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	mc_writel(mc, value, MC_EMEM_ARB_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	/* write latency allowance defaults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	for (i = 0; i < mc->soc->num_clients; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		const struct tegra_mc_la *la = &mc->soc->clients[i].la;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		value = mc_readl(mc, la->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		value &= ~(la->mask << la->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		value |= (la->def & la->mask) << la->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		mc_writel(mc, value, la->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	/* latch new values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	mc_writel(mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	struct tegra_mc_timing *timing = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	for (i = 0; i < mc->num_timings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		if (mc->timings[i].rate == rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 			timing = &mc->timings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	if (!timing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		dev_err(mc->dev, "no memory timing registered for rate %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 			rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	for (i = 0; i < mc->soc->num_emem_regs; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		mc_writel(mc, timing->emem_data[i], mc->soc->emem_regs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	u8 dram_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	dram_count = mc_readl(mc, MC_EMEM_ADR_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	dram_count &= MC_EMEM_ADR_CFG_EMEM_NUMDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	dram_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	return dram_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static int load_one_timing(struct tegra_mc *mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			   struct tegra_mc_timing *timing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 			   struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	err = of_property_read_u32(node, "clock-frequency", &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		dev_err(mc->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 			"timing %pOFn: failed to read rate\n", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	timing->rate = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	timing->emem_data = devm_kcalloc(mc->dev, mc->soc->num_emem_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 					 sizeof(u32), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	if (!timing->emem_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	err = of_property_read_u32_array(node, "nvidia,emem-configuration",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 					 timing->emem_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 					 mc->soc->num_emem_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		dev_err(mc->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			"timing %pOFn: failed to read EMEM configuration\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static int load_timings(struct tegra_mc *mc, struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	struct device_node *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	struct tegra_mc_timing *timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	int child_count = of_get_child_count(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	int i = 0, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	mc->timings = devm_kcalloc(mc->dev, child_count, sizeof(*timing),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 				   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	if (!mc->timings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	mc->num_timings = child_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	for_each_child_of_node(node, child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		timing = &mc->timings[i++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		err = load_one_timing(mc, timing, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 			of_node_put(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static int tegra_mc_setup_timings(struct tegra_mc *mc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	struct device_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	u32 ram_code, node_ram_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	ram_code = tegra_read_ram_code();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	mc->num_timings = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	for_each_child_of_node(mc->dev->of_node, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		err = of_property_read_u32(node, "nvidia,ram-code",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 					   &node_ram_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		if (err || (node_ram_code != ram_code))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		err = load_timings(mc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		of_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	if (mc->num_timings == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		dev_warn(mc->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 			 "no memory timings for RAM code %u registered\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 			 ram_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static const char *const status_names[32] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	[ 1] = "External interrupt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	[ 6] = "EMEM address decode error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	[ 7] = "GART page fault",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	[ 8] = "Security violation",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	[ 9] = "EMEM arbitration error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	[10] = "Page fault",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	[11] = "Invalid APB ASID update",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	[12] = "VPR violation",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	[13] = "Secure carveout violation",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	[16] = "MTS carveout violation",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static const char *const error_names[8] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	[2] = "EMEM decode error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	[3] = "TrustZone violation",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	[4] = "Carveout violation",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	[6] = "SMMU translation error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static irqreturn_t tegra_mc_irq(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	struct tegra_mc *mc = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	unsigned long status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	unsigned int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	/* mask all interrupts to avoid flooding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	for_each_set_bit(bit, &status, 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		const char *error = status_names[bit] ?: "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		const char *client = "unknown", *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		const char *direction, *secure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		phys_addr_t addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		char perm[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		u8 id, type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		value = mc_readl(mc, MC_ERR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) #ifdef CONFIG_PHYS_ADDR_T_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		if (mc->soc->num_address_bits > 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 			addr = ((value >> MC_ERR_STATUS_ADR_HI_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 				MC_ERR_STATUS_ADR_HI_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 			addr <<= 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		if (value & MC_ERR_STATUS_RW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			direction = "write";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 			direction = "read";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		if (value & MC_ERR_STATUS_SECURITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 			secure = "secure ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 			secure = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		id = value & mc->soc->client_id_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		for (i = 0; i < mc->soc->num_clients; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 			if (mc->soc->clients[i].id == id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 				client = mc->soc->clients[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		type = (value & MC_ERR_STATUS_TYPE_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		       MC_ERR_STATUS_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		desc = error_names[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		switch (value & MC_ERR_STATUS_TYPE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		case MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 			perm[0] = ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 			perm[1] = '[';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 			if (value & MC_ERR_STATUS_READABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 				perm[2] = 'R';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 				perm[2] = '-';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 			if (value & MC_ERR_STATUS_WRITABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 				perm[3] = 'W';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 				perm[3] = '-';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 			if (value & MC_ERR_STATUS_NONSECURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 				perm[4] = '-';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 				perm[4] = 'S';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 			perm[5] = ']';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 			perm[6] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 			perm[0] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		value = mc_readl(mc, MC_ERR_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		addr |= value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		dev_err_ratelimited(mc->dev, "%s: %s%s @%pa: %s (%s%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 				    client, secure, direction, &addr, error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 				    desc, perm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	/* clear interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	mc_writel(mc, status, MC_INTSTATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static __maybe_unused irqreturn_t tegra20_mc_irq(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	struct tegra_mc *mc = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	unsigned long status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	unsigned int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	/* mask all interrupts to avoid flooding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	for_each_set_bit(bit, &status, 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		const char *direction = "read", *secure = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		const char *error = status_names[bit];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		const char *client, *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		phys_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		u32 value, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		u8 id, type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		switch (BIT(bit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		case MC_INT_DECERR_EMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 			reg = MC_DECERR_EMEM_OTHERS_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 			value = mc_readl(mc, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 			id = value & mc->soc->client_id_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 			desc = error_names[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 			if (value & BIT(31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 				direction = "write";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		case MC_INT_INVALID_GART_PAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 			reg = MC_GART_ERROR_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 			value = mc_readl(mc, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 			id = (value >> 1) & mc->soc->client_id_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 			desc = error_names[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 			if (value & BIT(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 				direction = "write";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		case MC_INT_SECURITY_VIOLATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 			reg = MC_SECURITY_VIOLATION_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 			value = mc_readl(mc, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 			id = value & mc->soc->client_id_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 			type = (value & BIT(30)) ? 4 : 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 			desc = error_names[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 			secure = "secure ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 			if (value & BIT(31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 				direction = "write";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		client = mc->soc->clients[id].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		addr = mc_readl(mc, reg + sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		dev_err_ratelimited(mc->dev, "%s: %s%s @%pa: %s (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 				    client, secure, direction, &addr, error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 				    desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	/* clear interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	mc_writel(mc, status, MC_INTSTATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static int tegra_mc_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	struct tegra_mc *mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	void *isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	if (!mc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	platform_set_drvdata(pdev, mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	spin_lock_init(&mc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	mc->soc = of_device_get_match_data(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	mc->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	mask = DMA_BIT_MASK(mc->soc->num_address_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	err = dma_coerce_mask_and_coherent(&pdev->dev, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	/* length of MC tick in nanoseconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	mc->tick = 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	mc->regs = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	if (IS_ERR(mc->regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 		return PTR_ERR(mc->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	mc->clk = devm_clk_get(&pdev->dev, "mc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	if (IS_ERR(mc->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 		dev_err(&pdev->dev, "failed to get MC clock: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 			PTR_ERR(mc->clk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		return PTR_ERR(mc->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) #ifdef CONFIG_ARCH_TEGRA_2x_SOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	if (mc->soc == &tegra20_mc_soc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 		isr = tegra20_mc_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 		/* ensure that debug features are disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		mc_writel(mc, 0x00000000, MC_TIMING_CONTROL_DBG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 		err = tegra_mc_setup_latency_allowance(mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 			dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 				"failed to setup latency allowance: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 				err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		isr = tegra_mc_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		err = tegra_mc_setup_timings(mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 			dev_err(&pdev->dev, "failed to setup timings: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 				err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	mc->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	if (mc->irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 		dev_err(&pdev->dev, "interrupt not specified\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 		return mc->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	WARN(!mc->soc->client_id_mask, "missing client ID mask for this SoC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	mc_writel(mc, mc->soc->intmask, MC_INTMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	err = devm_request_irq(&pdev->dev, mc->irq, isr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 			       dev_name(&pdev->dev), mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 		dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", mc->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 			err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	err = tegra_mc_reset_setup(mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 		dev_err(&pdev->dev, "failed to register reset controller: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 			err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU) && mc->soc->smmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 		mc->smmu = tegra_smmu_probe(&pdev->dev, mc->soc->smmu, mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 		if (IS_ERR(mc->smmu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 			dev_err(&pdev->dev, "failed to probe SMMU: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 				PTR_ERR(mc->smmu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 			mc->smmu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && !mc->soc->smmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 		mc->gart = tegra_gart_probe(&pdev->dev, mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 		if (IS_ERR(mc->gart)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 			dev_err(&pdev->dev, "failed to probe GART: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 				PTR_ERR(mc->gart));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 			mc->gart = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) static int tegra_mc_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	struct tegra_mc *mc = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 		err = tegra_gart_suspend(mc->gart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) static int tegra_mc_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	struct tegra_mc *mc = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 		err = tegra_gart_resume(mc->gart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static const struct dev_pm_ops tegra_mc_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	.suspend = tegra_mc_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	.resume = tegra_mc_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) static struct platform_driver tegra_mc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 		.name = "tegra-mc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 		.of_match_table = tegra_mc_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 		.pm = &tegra_mc_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 		.suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	.prevent_deferred_probe = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	.probe = tegra_mc_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static int tegra_mc_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	return platform_driver_register(&tegra_mc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) arch_initcall(tegra_mc_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) MODULE_DESCRIPTION("NVIDIA Tegra Memory Controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) MODULE_LICENSE("GPL v2");