^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/firmware/imx/ipc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mailbox_controller.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define IMX_MU_xSR_RFn(x) BIT(24 + (3 - (x)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define IMX_MU_xSR_TEn(x) BIT(20 + (3 - (x)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define IMX_MU_xSR_BRDIP BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /* General Purpose Interrupt Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define IMX_MU_xCR_GIEn(x) BIT(28 + (3 - (x)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* Receive Interrupt Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define IMX_MU_xCR_RIEn(x) BIT(24 + (3 - (x)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* Transmit Interrupt Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define IMX_MU_xCR_TIEn(x) BIT(20 + (3 - (x)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* General Purpose Interrupt Request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define IMX_MU_xCR_GIRn(x) BIT(16 + (3 - (x)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define IMX_MU_CHANS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* TX0/RX0/RXDB[0-3] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define IMX_MU_SCU_CHANS 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define IMX_MU_CHAN_NAME_SIZE 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) enum imx_mu_chan_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) IMX_MU_TYPE_TX, /* Tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) IMX_MU_TYPE_RX, /* Rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) IMX_MU_TYPE_TXDB, /* Tx doorbell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) IMX_MU_TYPE_RXDB, /* Rx doorbell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct imx_sc_rpc_msg_max {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct imx_sc_rpc_msg hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u32 data[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct imx_mu_con_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) char irq_desc[IMX_MU_CHAN_NAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) enum imx_mu_chan_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct mbox_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct tasklet_struct txdb_tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct imx_mu_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) spinlock_t xcr_lock; /* control register lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct mbox_controller mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct mbox_chan mbox_chans[IMX_MU_CHANS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct imx_mu_con_priv con_priv[IMX_MU_CHANS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) const struct imx_mu_dcfg *dcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) bool suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u32 xcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) bool side_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct imx_mu_dcfg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) void (*init)(struct imx_mu_priv *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u32 xTR[4]; /* Transmit Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) u32 xRR[4]; /* Receive Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u32 xSR; /* Status Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u32 xCR; /* Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return container_of(mbox, struct imx_mu_priv, mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void imx_mu_write(struct imx_mu_priv *priv, u32 val, u32 offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) iowrite32(val, priv->base + offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static u32 imx_mu_read(struct imx_mu_priv *priv, u32 offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return ioread32(priv->base + offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, u32 set, u32 clr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) spin_lock_irqsave(&priv->xcr_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) val = imx_mu_read(priv, priv->dcfg->xCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) val &= ~clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) val |= set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) imx_mu_write(priv, val, priv->dcfg->xCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) spin_unlock_irqrestore(&priv->xcr_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static int imx_mu_generic_tx(struct imx_mu_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct imx_mu_con_priv *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u32 *arg = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) switch (cp->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) case IMX_MU_TYPE_TX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) imx_mu_write(priv, *arg, priv->dcfg->xTR[cp->idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) case IMX_MU_TYPE_TXDB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIRn(cp->idx), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) tasklet_schedule(&cp->txdb_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static int imx_mu_generic_rx(struct imx_mu_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct imx_mu_con_priv *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u32 dat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) dat = imx_mu_read(priv, priv->dcfg->xRR[cp->idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) mbox_chan_received_data(cp->chan, (void *)&dat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static int imx_mu_scu_tx(struct imx_mu_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct imx_mu_con_priv *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct imx_sc_rpc_msg_max *msg = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) u32 *arg = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) u32 xsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) switch (cp->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) case IMX_MU_TYPE_TX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * msg->hdr.size specifies the number of u32 words while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * sizeof yields bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (msg->hdr.size > sizeof(*msg) / 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * The real message size can be different to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * struct imx_sc_rpc_msg_max size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on TX; got: %i bytes\n", sizeof(*msg), msg->hdr.size << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) for (i = 0; i < 4 && i < msg->hdr.size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) imx_mu_write(priv, *arg++, priv->dcfg->xTR[i % 4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) for (; i < msg->hdr.size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ret = readl_poll_timeout(priv->base + priv->dcfg->xSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) xsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) xsr & IMX_MU_xSR_TEn(i % 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 0, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) dev_err(priv->dev, "Send data index: %d timeout\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) imx_mu_write(priv, *arg++, priv->dcfg->xTR[i % 4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static int imx_mu_scu_rx(struct imx_mu_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct imx_mu_con_priv *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct imx_sc_rpc_msg_max msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) u32 *data = (u32 *)&msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) u32 xsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) *data++ = imx_mu_read(priv, priv->dcfg->xRR[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (msg.hdr.size > sizeof(msg) / 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on RX; got: %i bytes\n", sizeof(msg), msg.hdr.size << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) for (i = 1; i < msg.hdr.size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ret = readl_poll_timeout(priv->base + priv->dcfg->xSR, xsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) xsr & IMX_MU_xSR_RFn(i % 4), 0, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) dev_err(priv->dev, "timeout read idx %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) *data++ = imx_mu_read(priv, priv->dcfg->xRR[i % 4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) imx_mu_xcr_rmw(priv, IMX_MU_xCR_RIEn(0), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) mbox_chan_received_data(cp->chan, (void *)&msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static void imx_mu_txdb_tasklet(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) mbox_chan_txdone(cp->chan, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static irqreturn_t imx_mu_isr(int irq, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct mbox_chan *chan = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct imx_mu_con_priv *cp = chan->con_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u32 val, ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ctrl = imx_mu_read(priv, priv->dcfg->xCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) val = imx_mu_read(priv, priv->dcfg->xSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) switch (cp->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) case IMX_MU_TYPE_TX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) val &= IMX_MU_xSR_TEn(cp->idx) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) (ctrl & IMX_MU_xCR_TIEn(cp->idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) case IMX_MU_TYPE_RX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) val &= IMX_MU_xSR_RFn(cp->idx) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) (ctrl & IMX_MU_xCR_RIEn(cp->idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) case IMX_MU_TYPE_RXDB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) val &= IMX_MU_xSR_GIPn(cp->idx) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) (ctrl & IMX_MU_xCR_GIEn(cp->idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (val == IMX_MU_xSR_TEn(cp->idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) mbox_chan_txdone(chan, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) } else if (val == IMX_MU_xSR_RFn(cp->idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) priv->dcfg->rx(priv, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) } else if (val == IMX_MU_xSR_GIPn(cp->idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) imx_mu_write(priv, IMX_MU_xSR_GIPn(cp->idx), priv->dcfg->xSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) mbox_chan_received_data(chan, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) dev_warn_ratelimited(priv->dev, "Not handled interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (priv->suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) pm_system_wakeup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static int imx_mu_send_data(struct mbox_chan *chan, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct imx_mu_con_priv *cp = chan->con_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return priv->dcfg->tx(priv, cp, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static int imx_mu_startup(struct mbox_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct imx_mu_con_priv *cp = chan->con_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) unsigned long irq_flag = IRQF_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) pm_runtime_get_sync(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (cp->type == IMX_MU_TYPE_TXDB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* Tx doorbell don't have ACK support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) (unsigned long)cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* IPC MU should be with IRQF_NO_SUSPEND set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (!priv->dev->pm_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) irq_flag |= IRQF_NO_SUSPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ret = request_irq(priv->irq, imx_mu_isr, irq_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) cp->irq_desc, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) dev_err(priv->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) "Unable to acquire IRQ %d\n", priv->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) switch (cp->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) case IMX_MU_TYPE_RX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) imx_mu_xcr_rmw(priv, IMX_MU_xCR_RIEn(cp->idx), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) case IMX_MU_TYPE_RXDB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIEn(cp->idx), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) priv->suspend = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static void imx_mu_shutdown(struct mbox_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct imx_mu_con_priv *cp = chan->con_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (cp->type == IMX_MU_TYPE_TXDB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) tasklet_kill(&cp->txdb_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) pm_runtime_put_sync(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) switch (cp->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) case IMX_MU_TYPE_TX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) case IMX_MU_TYPE_RX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(cp->idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) case IMX_MU_TYPE_RXDB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_GIEn(cp->idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) free_irq(priv->irq, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) pm_runtime_put_sync(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static const struct mbox_chan_ops imx_mu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) .send_data = imx_mu_send_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) .startup = imx_mu_startup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) .shutdown = imx_mu_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static struct mbox_chan *imx_mu_scu_xlate(struct mbox_controller *mbox,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) const struct of_phandle_args *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) u32 type, idx, chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (sp->args_count != 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) type = sp->args[0]; /* channel type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) idx = sp->args[1]; /* index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) case IMX_MU_TYPE_TX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) case IMX_MU_TYPE_RX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (idx != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) dev_err(mbox->dev, "Invalid chan idx: %d\n", idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) chan = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) case IMX_MU_TYPE_RXDB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) chan = 2 + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) dev_err(mbox->dev, "Invalid chan type: %d\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (chan >= mbox->num_chans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return &mbox->chans[chan];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) const struct of_phandle_args *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) u32 type, idx, chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (sp->args_count != 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) type = sp->args[0]; /* channel type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) idx = sp->args[1]; /* index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) chan = type * 4 + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (chan >= mbox->num_chans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return &mbox->chans[chan];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static void imx_mu_init_generic(struct imx_mu_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) for (i = 0; i < IMX_MU_CHANS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct imx_mu_con_priv *cp = &priv->con_priv[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) cp->idx = i % 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) cp->type = i >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) cp->chan = &priv->mbox_chans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) priv->mbox_chans[i].con_priv = cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) snprintf(cp->irq_desc, sizeof(cp->irq_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) "imx_mu_chan[%i-%i]", cp->type, cp->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) priv->mbox.num_chans = IMX_MU_CHANS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) priv->mbox.of_xlate = imx_mu_xlate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (priv->side_b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* Set default MU configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) imx_mu_write(priv, 0, priv->dcfg->xCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static void imx_mu_init_scu(struct imx_mu_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) for (i = 0; i < IMX_MU_SCU_CHANS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct imx_mu_con_priv *cp = &priv->con_priv[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) cp->idx = i < 2 ? 0 : i - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) cp->type = i < 2 ? i : IMX_MU_TYPE_RXDB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) cp->chan = &priv->mbox_chans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) priv->mbox_chans[i].con_priv = cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) snprintf(cp->irq_desc, sizeof(cp->irq_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) "imx_mu_chan[%i-%i]", cp->type, cp->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) priv->mbox.num_chans = IMX_MU_SCU_CHANS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) priv->mbox.of_xlate = imx_mu_scu_xlate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* Set default MU configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) imx_mu_write(priv, 0, priv->dcfg->xCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static int imx_mu_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct imx_mu_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) const struct imx_mu_dcfg *dcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) priv->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) priv->base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (IS_ERR(priv->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return PTR_ERR(priv->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) priv->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (priv->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return priv->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) dcfg = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!dcfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) priv->dcfg = dcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) priv->clk = devm_clk_get(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (IS_ERR(priv->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (PTR_ERR(priv->clk) != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return PTR_ERR(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) priv->clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) ret = clk_prepare_enable(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) dev_err(dev, "Failed to enable clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) priv->dcfg->init(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) spin_lock_init(&priv->xcr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) priv->mbox.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) priv->mbox.ops = &imx_mu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) priv->mbox.chans = priv->mbox_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) priv->mbox.txdone_irq = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) platform_set_drvdata(pdev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ret = devm_mbox_controller_register(dev, &priv->mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) ret = pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) pm_runtime_put_noidle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) goto disable_runtime_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) ret = pm_runtime_put_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) goto disable_runtime_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) priv->suspend = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) disable_runtime_pm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static int imx_mu_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct imx_mu_priv *priv = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) pm_runtime_disable(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) .tx = imx_mu_generic_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) .rx = imx_mu_generic_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) .init = imx_mu_init_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) .xTR = {0x0, 0x4, 0x8, 0xc},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) .xRR = {0x10, 0x14, 0x18, 0x1c},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) .xSR = 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) .xCR = 0x24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) .tx = imx_mu_generic_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) .rx = imx_mu_generic_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) .init = imx_mu_init_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) .xTR = {0x20, 0x24, 0x28, 0x2c},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) .xRR = {0x40, 0x44, 0x48, 0x4c},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) .xSR = 0x60,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) .xCR = 0x64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) .tx = imx_mu_scu_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) .rx = imx_mu_scu_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) .init = imx_mu_init_scu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) .xTR = {0x0, 0x4, 0x8, 0xc},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) .xRR = {0x10, 0x14, 0x18, 0x1c},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) .xSR = 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) .xCR = 0x24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static const struct of_device_id imx_mu_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) { .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) { .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) { .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static int __maybe_unused imx_mu_suspend_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct imx_mu_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (!priv->clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) priv->xcr = imx_mu_read(priv, priv->dcfg->xCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static int __maybe_unused imx_mu_resume_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct imx_mu_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * ONLY restore MU when context lost, the TIE could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * be set during noirq resume as there is MU data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * communication going on, and restore the saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * value will overwrite the TIE and cause MU data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * send failed, may lead to system freeze. This issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * is observed by testing freeze mode suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (!imx_mu_read(priv, priv->dcfg->xCR) && !priv->clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) imx_mu_write(priv, priv->xcr, priv->dcfg->xCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static int __maybe_unused imx_mu_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct imx_mu_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static int __maybe_unused imx_mu_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct imx_mu_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) ret = clk_prepare_enable(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) dev_err(dev, "failed to enable clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static const struct dev_pm_ops imx_mu_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_mu_suspend_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) imx_mu_resume_noirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) imx_mu_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) static struct platform_driver imx_mu_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) .probe = imx_mu_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) .remove = imx_mu_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) .name = "imx_mu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) .of_match_table = imx_mu_dt_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) .pm = &imx_mu_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) module_platform_driver(imx_mu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) MODULE_DESCRIPTION("Message Unit driver for i.MX");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) MODULE_LICENSE("GPL v2");