^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2015, Sony Mobile Communications Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/mfd/syscon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/soc/qcom/smem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/soc/qcom/smem_state.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * This driver implements the Qualcomm Shared Memory State Machine, a mechanism
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * for communicating single bit state information to remote processors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * The implementation is based on two sections of shared memory; the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * holding the state bits and the second holding a matrix of subscription bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * The state bits are structured in entries of 32 bits, each belonging to one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * system in the SoC. The entry belonging to the local system is considered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * read-write, while the rest should be considered read-only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * The subscription matrix consists of N bitmaps per entry, denoting interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * in updates of the entry for each of the N hosts. Upon updating a state bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * each host's subscription bitmap should be queried and the remote system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * should be interrupted if they request so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * The subscription matrix is laid out in entry-major order:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * entry0: [host0 ... hostN]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * entryM: [host0 ... hostN]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * A third, optional, shared memory region might contain information regarding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * the number of entries in the state bitmap as well as number of columns in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * the subscription matrix.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Shared memory identifiers, used to acquire handles to respective memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define SMEM_SMSM_SHARED_STATE 85
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define SMEM_SMSM_CPU_INTR_MASK 333
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define SMEM_SMSM_SIZE_INFO 419
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * Default sizes, in case SMEM_SMSM_SIZE_INFO is not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define SMSM_DEFAULT_NUM_ENTRIES 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define SMSM_DEFAULT_NUM_HOSTS 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct smsm_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct smsm_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * struct qcom_smsm - smsm driver context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * @dev: smsm device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * @local_host: column in the subscription matrix representing this system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * @num_hosts: number of columns in the subscription matrix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * @num_entries: number of entries in the state map and rows in the subscription
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * matrix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @local_state: pointer to the local processor's state bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * @subscription: pointer to local processor's row in subscription matrix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @state: smem state handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * @lock: spinlock for read-modify-write of the outgoing state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * @entries: context for each of the entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * @hosts: context for each of the hosts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct qcom_smsm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u32 local_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u32 num_hosts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u32 num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u32 *local_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u32 *subscription;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct qcom_smem_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct smsm_entry *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct smsm_host *hosts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * struct smsm_entry - per remote processor entry context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * @smsm: back-reference to driver context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * @domain: IRQ domain for this entry, if representing a remote system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * @irq_enabled: bitmap of which state bits IRQs are enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * @irq_rising: bitmap tracking if rising bits should be propagated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * @irq_falling: bitmap tracking if falling bits should be propagated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * @last_value: snapshot of state bits last time the interrupts where propagated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * @remote_state: pointer to this entry's state bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * @subscription: pointer to a row in the subscription matrix representing this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct smsm_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct qcom_smsm *smsm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct irq_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) DECLARE_BITMAP(irq_enabled, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) DECLARE_BITMAP(irq_rising, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) DECLARE_BITMAP(irq_falling, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned long last_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u32 *remote_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u32 *subscription;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * struct smsm_host - representation of a remote host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * @ipc_regmap: regmap for outgoing interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * @ipc_offset: offset in @ipc_regmap for outgoing interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * @ipc_bit: bit in @ipc_regmap + @ipc_offset for outgoing interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct smsm_host {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct regmap *ipc_regmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int ipc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int ipc_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * smsm_update_bits() - change bit in outgoing entry and inform subscribers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * @data: smsm context pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * @offset: bit in the entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * @value: new value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * Used to set and clear the bits in the outgoing/local entry and inform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * subscribers about the change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static int smsm_update_bits(void *data, u32 mask, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct qcom_smsm *smsm = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct smsm_host *hostp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) u32 changes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u32 host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) u32 orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) spin_lock_irqsave(&smsm->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* Update the entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) val = orig = readl(smsm->local_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) val &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) val |= value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* Don't signal if we didn't change the value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) changes = val ^ orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (!changes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) spin_unlock_irqrestore(&smsm->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* Write out the new value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) writel(val, smsm->local_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) spin_unlock_irqrestore(&smsm->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* Make sure the value update is ordered before any kicks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* Iterate over all hosts to check whom wants a kick */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) for (host = 0; host < smsm->num_hosts; host++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) hostp = &smsm->hosts[host];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) val = readl(smsm->subscription + host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (val & changes && hostp->ipc_regmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) regmap_write(hostp->ipc_regmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) hostp->ipc_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) BIT(hostp->ipc_bit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static const struct qcom_smem_state_ops smsm_state_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) .update_bits = smsm_update_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * smsm_intr() - cascading IRQ handler for SMSM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @irq: unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * @data: entry related to this IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * This function cascades an incoming interrupt from a remote system, based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * the state bits and configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static irqreturn_t smsm_intr(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct smsm_entry *entry = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) int irq_pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) u32 changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) val = readl(entry->remote_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) changed = val ^ xchg(&entry->last_value, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) for_each_set_bit(i, entry->irq_enabled, 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (!(changed & BIT(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (val & BIT(i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (test_bit(i, entry->irq_rising)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) irq_pin = irq_find_mapping(entry->domain, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) handle_nested_irq(irq_pin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (test_bit(i, entry->irq_falling)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) irq_pin = irq_find_mapping(entry->domain, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) handle_nested_irq(irq_pin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * smsm_mask_irq() - un-subscribe from cascades of IRQs of a certain staus bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * @irqd: IRQ handle to be masked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * This un-subscribes the local CPU from interrupts upon changes to the defines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * status bit. The bit is also cleared from cascading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static void smsm_mask_irq(struct irq_data *irqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) irq_hw_number_t irq = irqd_to_hwirq(irqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct qcom_smsm *smsm = entry->smsm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (entry->subscription) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) val = readl(entry->subscription + smsm->local_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) val &= ~BIT(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) writel(val, entry->subscription + smsm->local_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) clear_bit(irq, entry->irq_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * smsm_unmask_irq() - subscribe to cascades of IRQs of a certain status bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * @irqd: IRQ handle to be unmasked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * This subscribes the local CPU to interrupts upon changes to the defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * status bit. The bit is also marked for cascading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static void smsm_unmask_irq(struct irq_data *irqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) irq_hw_number_t irq = irqd_to_hwirq(irqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct qcom_smsm *smsm = entry->smsm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* Make sure our last cached state is up-to-date */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (readl(entry->remote_state) & BIT(irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) set_bit(irq, &entry->last_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) clear_bit(irq, &entry->last_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) set_bit(irq, entry->irq_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (entry->subscription) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) val = readl(entry->subscription + smsm->local_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) val |= BIT(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) writel(val, entry->subscription + smsm->local_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * smsm_set_irq_type() - updates the requested IRQ type for the cascading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * @irqd: consumer interrupt handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * @type: requested flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static int smsm_set_irq_type(struct irq_data *irqd, unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) irq_hw_number_t irq = irqd_to_hwirq(irqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (!(type & IRQ_TYPE_EDGE_BOTH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (type & IRQ_TYPE_EDGE_RISING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) set_bit(irq, entry->irq_rising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) clear_bit(irq, entry->irq_rising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (type & IRQ_TYPE_EDGE_FALLING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) set_bit(irq, entry->irq_falling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) clear_bit(irq, entry->irq_falling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static struct irq_chip smsm_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) .name = "smsm",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) .irq_mask = smsm_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) .irq_unmask = smsm_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) .irq_set_type = smsm_set_irq_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * smsm_irq_map() - sets up a mapping for a cascaded IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * @d: IRQ domain representing an entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * @irq: IRQ to set up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * @hw: unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static int smsm_irq_map(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) unsigned int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) irq_hw_number_t hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct smsm_entry *entry = d->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) irq_set_chip_and_handler(irq, &smsm_irq_chip, handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) irq_set_chip_data(irq, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) irq_set_nested_thread(irq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static const struct irq_domain_ops smsm_irq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) .map = smsm_irq_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) .xlate = irq_domain_xlate_twocell,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * smsm_parse_ipc() - parses a qcom,ipc-%d device tree property
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * @smsm: smsm driver context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * @host_id: index of the remote host to be resolved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * Parses device tree to acquire the information needed for sending the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * outgoing interrupts to a remote host - identified by @host_id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct device_node *syscon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct device_node *node = smsm->dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct smsm_host *host = &smsm->hosts[host_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) char key[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) snprintf(key, sizeof(key), "qcom,ipc-%d", host_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) syscon = of_parse_phandle(node, key, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (!syscon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) host->ipc_regmap = syscon_node_to_regmap(syscon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (IS_ERR(host->ipc_regmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return PTR_ERR(host->ipc_regmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ret = of_property_read_u32_index(node, key, 1, &host->ipc_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) dev_err(smsm->dev, "no offset in %s\n", key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ret = of_property_read_u32_index(node, key, 2, &host->ipc_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) dev_err(smsm->dev, "no bit in %s\n", key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * smsm_inbound_entry() - parse DT and set up an entry representing a remote system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * @smsm: smsm driver context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * @entry: entry context to be set up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * @node: dt node containing the entry's properties
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static int smsm_inbound_entry(struct qcom_smsm *smsm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct smsm_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) irq = irq_of_parse_and_map(node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (!irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) dev_err(smsm->dev, "failed to parse smsm interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ret = devm_request_threaded_irq(smsm->dev, irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) NULL, smsm_intr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) IRQF_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) "smsm", (void *)entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) dev_err(smsm->dev, "failed to request interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) entry->domain = irq_domain_add_linear(node, 32, &smsm_irq_ops, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (!entry->domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) dev_err(smsm->dev, "failed to add irq_domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * smsm_get_size_info() - parse the optional memory segment for sizes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * @smsm: smsm driver context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * Attempt to acquire the number of hosts and entries from the optional shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * memory location. Not being able to find this segment should indicate that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * we're on a older system where these values was hard coded to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * SMSM_DEFAULT_NUM_ENTRIES and SMSM_DEFAULT_NUM_HOSTS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * Returns 0 on success, negative errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static int smsm_get_size_info(struct qcom_smsm *smsm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) u32 num_hosts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) u32 num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) u32 reserved0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) u32 reserved1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) } *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SIZE_INFO, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (IS_ERR(info) && PTR_ERR(info) != -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (PTR_ERR(info) != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) dev_err(smsm->dev, "unable to retrieve smsm size info\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return PTR_ERR(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) } else if (IS_ERR(info) || size != sizeof(*info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) dev_warn(smsm->dev, "no smsm size info, using defaults\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) smsm->num_entries = SMSM_DEFAULT_NUM_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) smsm->num_hosts = SMSM_DEFAULT_NUM_HOSTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) smsm->num_entries = info->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) smsm->num_hosts = info->num_hosts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) dev_dbg(smsm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) "found custom size of smsm: %d entries %d hosts\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) smsm->num_entries, smsm->num_hosts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static int qcom_smsm_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct device_node *local_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct device_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct smsm_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct qcom_smsm *smsm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) u32 *intr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) u32 *states;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) smsm = devm_kzalloc(&pdev->dev, sizeof(*smsm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (!smsm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) smsm->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) spin_lock_init(&smsm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ret = smsm_get_size_info(smsm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) smsm->entries = devm_kcalloc(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) smsm->num_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) sizeof(struct smsm_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (!smsm->entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) smsm->hosts = devm_kcalloc(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) smsm->num_hosts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) sizeof(struct smsm_host),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (!smsm->hosts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) for_each_child_of_node(pdev->dev.of_node, local_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (of_find_property(local_node, "#qcom,smem-state-cells", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (!local_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) dev_err(&pdev->dev, "no state entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) of_property_read_u32(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) "qcom,local-host",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) &smsm->local_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* Parse the host properties */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) for (id = 0; id < smsm->num_hosts; id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ret = smsm_parse_ipc(smsm, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* Acquire the main SMSM state vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) smsm->num_entries * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (ret < 0 && ret != -EEXIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) dev_err(&pdev->dev, "unable to allocate shared state entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (IS_ERR(states)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return PTR_ERR(states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /* Acquire the list of interrupt mask vectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) size = smsm->num_entries * smsm->num_hosts * sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (ret < 0 && ret != -EEXIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (IS_ERR(intr_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return PTR_ERR(intr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /* Setup the reference to the local state bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) smsm->local_state = states + smsm->local_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) smsm->subscription = intr_mask + smsm->local_host * smsm->num_hosts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /* Register the outgoing state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (IS_ERR(smsm->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) dev_err(smsm->dev, "failed to register qcom_smem_state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return PTR_ERR(smsm->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* Register handlers for remote processor entries of interest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) for_each_available_child_of_node(pdev->dev.of_node, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (!of_property_read_bool(node, "interrupt-controller"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ret = of_property_read_u32(node, "reg", &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (ret || id >= smsm->num_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) dev_err(&pdev->dev, "invalid reg of entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) goto unwind_interfaces;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) entry = &smsm->entries[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) entry->smsm = smsm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) entry->remote_state = states + id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /* Setup subscription pointers and unsubscribe to any kicks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) entry->subscription = intr_mask + id * smsm->num_hosts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) writel(0, entry->subscription + smsm->local_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) ret = smsm_inbound_entry(smsm, entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) goto unwind_interfaces;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) platform_set_drvdata(pdev, smsm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) unwind_interfaces:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) for (id = 0; id < smsm->num_entries; id++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (smsm->entries[id].domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) irq_domain_remove(smsm->entries[id].domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) qcom_smem_state_unregister(smsm->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static int qcom_smsm_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct qcom_smsm *smsm = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) unsigned id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) for (id = 0; id < smsm->num_entries; id++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (smsm->entries[id].domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) irq_domain_remove(smsm->entries[id].domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) qcom_smem_state_unregister(smsm->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static const struct of_device_id qcom_smsm_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) { .compatible = "qcom,smsm" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) MODULE_DEVICE_TABLE(of, qcom_smsm_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static struct platform_driver qcom_smsm_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) .probe = qcom_smsm_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) .remove = qcom_smsm_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) .name = "qcom-smsm",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) .of_match_table = qcom_smsm_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) module_platform_driver(qcom_smsm_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) MODULE_DESCRIPTION("Qualcomm Shared Memory State Machine driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) MODULE_LICENSE("GPL v2");