^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Authors: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/arm-smccc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mailbox_client.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mfd/syscon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/of_reserved_mem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/pm_wakeirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/remoteproc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "remoteproc_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define HOLD_BOOT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define RELEASE_BOOT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define MBOX_NB_VQ 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define MBOX_NB_MBX 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define STM32_SMC_RCC 0x82001000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define STM32_SMC_REG_WRITE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define STM32_MBX_VQ0 "vq0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define STM32_MBX_VQ0_ID 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define STM32_MBX_VQ1 "vq1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define STM32_MBX_VQ1_ID 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define STM32_MBX_SHUTDOWN "shutdown"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define RSC_TBL_SIZE 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define M4_STATE_OFF 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define M4_STATE_INI 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define M4_STATE_CRUN 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define M4_STATE_CSTOP 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define M4_STATE_STANDBY 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define M4_STATE_CRASH 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct stm32_syscon {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct regmap *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct stm32_rproc_mem {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) char name[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) void __iomem *cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) phys_addr_t bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) u32 dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct stm32_rproc_mem_ranges {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) u32 dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u32 bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct stm32_mbox {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) const unsigned char name[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct mbox_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct mbox_client client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct work_struct vq_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int vq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct stm32_rproc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct reset_control *rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct stm32_syscon hold_boot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct stm32_syscon pdds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct stm32_syscon m4_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct stm32_syscon rsctbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int wdg_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) u32 nb_rmems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct stm32_rproc_mem *rmems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct stm32_mbox mb[MBOX_NB_MBX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct workqueue_struct *workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) bool secured_soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) void __iomem *rsc_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static int stm32_rproc_pa_to_da(struct rproc *rproc, phys_addr_t pa, u64 *da)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct stm32_rproc *ddata = rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct stm32_rproc_mem *p_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) for (i = 0; i < ddata->nb_rmems; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) p_mem = &ddata->rmems[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (pa < p_mem->bus_addr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) pa >= p_mem->bus_addr + p_mem->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) *da = pa - p_mem->bus_addr + p_mem->dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) dev_dbg(rproc->dev.parent, "pa %pa to da %llx\n", &pa, *da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static int stm32_rproc_mem_alloc(struct rproc *rproc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct rproc_mem_entry *mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct device *dev = rproc->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void *va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) dev_dbg(dev, "map memory: %pa+%x\n", &mem->dma, mem->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) va = ioremap_wc(mem->dma, mem->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (IS_ERR_OR_NULL(va)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) dev_err(dev, "Unable to map memory region: %pa+%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) &mem->dma, mem->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* Update memory entry va */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) mem->va = va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static int stm32_rproc_mem_release(struct rproc *rproc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct rproc_mem_entry *mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) iounmap(mem->va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static int stm32_rproc_of_memory_translations(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct stm32_rproc *ddata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct device *parent, *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct stm32_rproc_mem *p_mems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct stm32_rproc_mem_ranges *mem_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int cnt, array_size, i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) parent = dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) np = parent->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) cnt = of_property_count_elems_of_size(np, "dma-ranges",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) sizeof(*mem_range));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (cnt <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) dev_err(dev, "%s: dma-ranges property not defined\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) p_mems = devm_kcalloc(dev, cnt, sizeof(*p_mems), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (!p_mems)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) mem_range = kcalloc(cnt, sizeof(*mem_range), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (!mem_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) array_size = cnt * sizeof(struct stm32_rproc_mem_ranges) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) ret = of_property_read_u32_array(np, "dma-ranges",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) (u32 *)mem_range, array_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) dev_err(dev, "error while get dma-ranges property: %x\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) goto free_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) for (i = 0; i < cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) p_mems[i].bus_addr = mem_range[i].bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) p_mems[i].dev_addr = mem_range[i].dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) p_mems[i].size = mem_range[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) dev_dbg(dev, "memory range[%i]: da %#x, pa %pa, size %#zx:\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) i, p_mems[i].dev_addr, &p_mems[i].bus_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) p_mems[i].size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ddata->rmems = p_mems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ddata->nb_rmems = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) free_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) kfree(mem_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static int stm32_rproc_mbox_idx(struct rproc *rproc, const unsigned char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct stm32_rproc *ddata = rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) for (i = 0; i < ARRAY_SIZE(ddata->mb); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (!strncmp(ddata->mb[i].name, name, strlen(name)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) dev_err(&rproc->dev, "mailbox %s not found\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static int stm32_rproc_elf_load_rsc_table(struct rproc *rproc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) const struct firmware *fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (rproc_elf_load_rsc_table(rproc, fw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) dev_warn(&rproc->dev, "no resource table found for this firmware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static int stm32_rproc_parse_memory_regions(struct rproc *rproc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct device *dev = rproc->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct of_phandle_iterator it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct rproc_mem_entry *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct reserved_mem *rmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u64 da;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /* Register associated reserved memory regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) while (of_phandle_iterator_next(&it) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) rmem = of_reserved_mem_lookup(it.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (!rmem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) dev_err(dev, "unable to acquire memory-region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (stm32_rproc_pa_to_da(rproc, rmem->base, &da) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) dev_err(dev, "memory region not valid %pa\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) &rmem->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* No need to map vdev buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (strcmp(it.node->name, "vdev0buffer")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* Register memory region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) mem = rproc_mem_entry_init(dev, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) (dma_addr_t)rmem->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) rmem->size, da,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) stm32_rproc_mem_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) stm32_rproc_mem_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) it.node->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) rproc_coredump_add_segment(rproc, da,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) rmem->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* Register reserved memory for vdev buffer alloc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) mem = rproc_of_resm_mem_entry_init(dev, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) rmem->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) rmem->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) it.node->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (!mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) rproc_add_carveout(rproc, mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) int ret = stm32_rproc_parse_memory_regions(rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return stm32_rproc_elf_load_rsc_table(rproc, fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static irqreturn_t stm32_rproc_wdg(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct platform_device *pdev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct rproc *rproc = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) rproc_report_crash(rproc, RPROC_WATCHDOG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static void stm32_rproc_mb_vq_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct stm32_mbox *mb = container_of(work, struct stm32_mbox, vq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct rproc *rproc = dev_get_drvdata(mb->client.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (rproc_vq_interrupt(rproc, mb->vq_id) == IRQ_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) dev_dbg(&rproc->dev, "no message found in vq%d\n", mb->vq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static void stm32_rproc_mb_callback(struct mbox_client *cl, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct rproc *rproc = dev_get_drvdata(cl->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct stm32_mbox *mb = container_of(cl, struct stm32_mbox, client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct stm32_rproc *ddata = rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) queue_work(ddata->workqueue, &mb->vq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static void stm32_rproc_free_mbox(struct rproc *rproc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct stm32_rproc *ddata = rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) for (i = 0; i < ARRAY_SIZE(ddata->mb); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (ddata->mb[i].chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) mbox_free_channel(ddata->mb[i].chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) ddata->mb[i].chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static const struct stm32_mbox stm32_rproc_mbox[MBOX_NB_MBX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) .name = STM32_MBX_VQ0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) .vq_id = STM32_MBX_VQ0_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) .client = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) .rx_callback = stm32_rproc_mb_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) .tx_block = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) .name = STM32_MBX_VQ1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) .vq_id = STM32_MBX_VQ1_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) .client = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) .rx_callback = stm32_rproc_mb_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) .tx_block = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) .name = STM32_MBX_SHUTDOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) .vq_id = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) .client = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) .tx_block = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) .tx_done = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) .tx_tout = 500, /* 500 ms time out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static int stm32_rproc_request_mbox(struct rproc *rproc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct stm32_rproc *ddata = rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct device *dev = &rproc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) const unsigned char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct mbox_client *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* Initialise mailbox structure table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) memcpy(ddata->mb, stm32_rproc_mbox, sizeof(stm32_rproc_mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) for (i = 0; i < MBOX_NB_MBX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) name = ddata->mb[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) cl = &ddata->mb[i].client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) cl->dev = dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ddata->mb[i].chan = mbox_request_channel_byname(cl, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (IS_ERR(ddata->mb[i].chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (PTR_ERR(ddata->mb[i].chan) == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) goto err_probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) dev_warn(dev, "cannot get %s mbox\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) ddata->mb[i].chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (ddata->mb[i].vq_id >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) INIT_WORK(&ddata->mb[i].vq_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) stm32_rproc_mb_vq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) err_probe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) for (j = i - 1; j >= 0; j--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (ddata->mb[j].chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) mbox_free_channel(ddata->mb[j].chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static int stm32_rproc_set_hold_boot(struct rproc *rproc, bool hold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct stm32_rproc *ddata = rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct stm32_syscon hold_boot = ddata->hold_boot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct arm_smccc_res smc_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int val, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) val = hold ? HOLD_BOOT : RELEASE_BOOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (IS_ENABLED(CONFIG_HAVE_ARM_SMCCC) && ddata->secured_soc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) arm_smccc_smc(STM32_SMC_RCC, STM32_SMC_REG_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) hold_boot.reg, val, 0, 0, 0, 0, &smc_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) err = smc_res.a0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) err = regmap_update_bits(hold_boot.map, hold_boot.reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) hold_boot.mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) dev_err(&rproc->dev, "failed to set hold boot\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void stm32_rproc_add_coredump_trace(struct rproc *rproc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct rproc_debug_trace *trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct rproc_dump_segment *segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) bool already_added;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) list_for_each_entry(trace, &rproc->traces, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) already_added = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) list_for_each_entry(segment, &rproc->dump_segments, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (segment->da == trace->trace_mem.da) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) already_added = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (!already_added)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) rproc_coredump_add_segment(rproc, trace->trace_mem.da,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) trace->trace_mem.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static int stm32_rproc_start(struct rproc *rproc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct stm32_rproc *ddata = rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) stm32_rproc_add_coredump_trace(rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* clear remote proc Deep Sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (ddata->pdds.map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) err = regmap_update_bits(ddata->pdds.map, ddata->pdds.reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) ddata->pdds.mask, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) dev_err(&rproc->dev, "failed to clear pdds\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) err = stm32_rproc_set_hold_boot(rproc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return stm32_rproc_set_hold_boot(rproc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static int stm32_rproc_attach(struct rproc *rproc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) stm32_rproc_add_coredump_trace(rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return stm32_rproc_set_hold_boot(rproc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static int stm32_rproc_stop(struct rproc *rproc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct stm32_rproc *ddata = rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) int err, dummy_data, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* request shutdown of the remote processor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (rproc->state != RPROC_OFFLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_SHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (idx >= 0 && ddata->mb[idx].chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* a dummy data is sent to allow to block on transmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) err = mbox_send_message(ddata->mb[idx].chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) &dummy_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) dev_warn(&rproc->dev, "warning: remote FW shutdown without ack\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) err = stm32_rproc_set_hold_boot(rproc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) err = reset_control_assert(ddata->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) dev_err(&rproc->dev, "failed to assert the reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /* to allow platform Standby power mode, set remote proc Deep Sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (ddata->pdds.map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) err = regmap_update_bits(ddata->pdds.map, ddata->pdds.reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ddata->pdds.mask, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) dev_err(&rproc->dev, "failed to set pdds\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /* update coprocessor state to OFF if available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (ddata->m4_state.map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) err = regmap_update_bits(ddata->m4_state.map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) ddata->m4_state.reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ddata->m4_state.mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) M4_STATE_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) dev_err(&rproc->dev, "failed to set copro state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) static void stm32_rproc_kick(struct rproc *rproc, int vqid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct stm32_rproc *ddata = rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (WARN_ON(vqid >= MBOX_NB_VQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) for (i = 0; i < MBOX_NB_MBX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (vqid != ddata->mb[i].vq_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (!ddata->mb[i].chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) err = mbox_send_message(ddata->mb[i].chan, (void *)(long)vqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) dev_err(&rproc->dev, "%s: failed (%s, err:%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) __func__, ddata->mb[i].name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static struct rproc_ops st_rproc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) .start = stm32_rproc_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) .stop = stm32_rproc_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) .attach = stm32_rproc_attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) .kick = stm32_rproc_kick,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) .load = rproc_elf_load_segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) .parse_fw = stm32_rproc_parse_fw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) .sanity_check = rproc_elf_sanity_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) .get_boot_addr = rproc_elf_get_boot_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) static const struct of_device_id stm32_rproc_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) { .compatible = "st,stm32mp1-m4" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) MODULE_DEVICE_TABLE(of, stm32_rproc_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static int stm32_rproc_get_syscon(struct device_node *np, const char *prop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct stm32_syscon *syscon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) syscon->map = syscon_regmap_lookup_by_phandle(np, prop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (IS_ERR(syscon->map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) err = PTR_ERR(syscon->map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) syscon->map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) err = of_property_read_u32_index(np, prop, 1, &syscon->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) err = of_property_read_u32_index(np, prop, 2, &syscon->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static int stm32_rproc_parse_dt(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct stm32_rproc *ddata, bool *auto_boot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct stm32_syscon tz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) unsigned int tzen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) int err, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (irq == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (irq > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) err = devm_request_irq(dev, irq, stm32_rproc_wdg, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) dev_name(dev), pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) dev_err(dev, "failed to request wdg irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ddata->wdg_irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (of_property_read_bool(np, "wakeup-source")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) device_init_wakeup(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) dev_pm_set_wake_irq(dev, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) dev_info(dev, "wdg irq registered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) ddata->rst = devm_reset_control_get_by_index(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (IS_ERR(ddata->rst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) dev_err(dev, "failed to get mcu reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return PTR_ERR(ddata->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * if platform is secured the hold boot bit must be written by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * smc call and read normally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * if not secure the hold boot bit could be read/write normally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) err = stm32_rproc_get_syscon(np, "st,syscfg-tz", &tz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) dev_err(dev, "failed to get tz syscfg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) err = regmap_read(tz.map, tz.reg, &tzen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) dev_err(dev, "failed to read tzen\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) ddata->secured_soc = tzen & tz.mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) err = stm32_rproc_get_syscon(np, "st,syscfg-holdboot",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) &ddata->hold_boot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) dev_err(dev, "failed to get hold boot\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) err = stm32_rproc_get_syscon(np, "st,syscfg-pdds", &ddata->pdds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) dev_info(dev, "failed to get pdds\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) *auto_boot = of_property_read_bool(np, "st,auto-boot");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * See if we can check the M4 status, i.e if it was started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * from the boot loader or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) err = stm32_rproc_get_syscon(np, "st,syscfg-m4-state",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) &ddata->m4_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* remember this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) ddata->m4_state.map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /* no coprocessor state syscon (optional) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) dev_warn(dev, "m4 state not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /* no need to go further */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /* See if we can get the resource table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) err = stm32_rproc_get_syscon(np, "st,syscfg-rsc-tbl",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) &ddata->rsctbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /* no rsc table syscon (optional) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) dev_warn(dev, "rsc tbl syscon not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static int stm32_rproc_get_m4_status(struct stm32_rproc *ddata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) unsigned int *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* See stm32_rproc_parse_dt() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (!ddata->m4_state.map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * We couldn't get the coprocessor's state, assume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * it is not running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) *state = M4_STATE_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return regmap_read(ddata->m4_state.map, ddata->m4_state.reg, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static int stm32_rproc_da_to_pa(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct stm32_rproc *ddata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) u64 da, phys_addr_t *pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct stm32_rproc_mem *p_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) for (i = 0; i < ddata->nb_rmems; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) p_mem = &ddata->rmems[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (da < p_mem->dev_addr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) da >= p_mem->dev_addr + p_mem->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) *pa = da - p_mem->dev_addr + p_mem->bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) dev_dbg(dev, "da %llx to pa %#x\n", da, *pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) dev_err(dev, "can't translate da %llx\n", da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) static int stm32_rproc_get_loaded_rsc_table(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct rproc *rproc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct stm32_rproc *ddata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) phys_addr_t rsc_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) u32 rsc_da;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) err = regmap_read(ddata->rsctbl.map, ddata->rsctbl.reg, &rsc_da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) dev_err(dev, "failed to read rsc tbl addr\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (!rsc_da)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /* no rsc table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) err = stm32_rproc_da_to_pa(pdev, ddata, rsc_da, &rsc_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (IS_ERR_OR_NULL(ddata->rsc_va)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) dev_err(dev, "Unable to map memory region: %pa+%zx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) &rsc_pa, RSC_TBL_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) ddata->rsc_va = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * The resource table is already loaded in device memory, no need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * to work with a cached table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) rproc->cached_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /* Assuming the resource table fits in 1kB is fair */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) rproc->table_sz = RSC_TBL_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) rproc->table_ptr = (struct resource_table *)ddata->rsc_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static int stm32_rproc_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct stm32_rproc *ddata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct rproc *rproc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) unsigned int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) rproc = rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (!rproc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ddata = rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) ret = stm32_rproc_parse_dt(pdev, ddata, &rproc->auto_boot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) goto free_rproc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) ret = stm32_rproc_of_memory_translations(pdev, ddata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) goto free_rproc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) ret = stm32_rproc_get_m4_status(ddata, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) goto free_rproc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (state == M4_STATE_CRUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) rproc->state = RPROC_DETACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) ret = stm32_rproc_parse_memory_regions(rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) ret = stm32_rproc_get_loaded_rsc_table(pdev, rproc, ddata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) rproc->has_iommu = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) ddata->workqueue = create_workqueue(dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (!ddata->workqueue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) dev_err(dev, "cannot create workqueue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) platform_set_drvdata(pdev, rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ret = stm32_rproc_request_mbox(rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) goto free_wkq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) ret = rproc_add(rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) goto free_mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) free_mb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) stm32_rproc_free_mbox(rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) free_wkq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) destroy_workqueue(ddata->workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) free_resources:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) rproc_resource_cleanup(rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) free_rproc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (device_may_wakeup(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) dev_pm_clear_wake_irq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) device_init_wakeup(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) rproc_free(rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) static int stm32_rproc_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) struct rproc *rproc = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct stm32_rproc *ddata = rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (atomic_read(&rproc->power) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) rproc_shutdown(rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) rproc_del(rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) stm32_rproc_free_mbox(rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) destroy_workqueue(ddata->workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (device_may_wakeup(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) dev_pm_clear_wake_irq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) device_init_wakeup(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) rproc_free(rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) static int __maybe_unused stm32_rproc_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct rproc *rproc = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct stm32_rproc *ddata = rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (device_may_wakeup(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return enable_irq_wake(ddata->wdg_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) static int __maybe_unused stm32_rproc_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct rproc *rproc = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) struct stm32_rproc *ddata = rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (device_may_wakeup(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return disable_irq_wake(ddata->wdg_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) static SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) stm32_rproc_suspend, stm32_rproc_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) static struct platform_driver stm32_rproc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) .probe = stm32_rproc_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) .remove = stm32_rproc_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) .name = "stm32-rproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) .pm = &stm32_rproc_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) .of_match_table = of_match_ptr(stm32_rproc_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) module_platform_driver(stm32_rproc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) MODULE_DESCRIPTION("STM32 Remote Processor Control Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)