^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) // Copyright (c) 2018 MediaTek Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/mailbox_controller.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/soc/mediatek/mtk-cmdq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define CMDQ_WRITE_ENABLE_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define CMDQ_POLL_ENABLE_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define CMDQ_EOC_IRQ_EN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define CMDQ_REG_TYPE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define CMDQ_JUMP_RELATIVE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct cmdq_instruction {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) u16 arg_c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) u16 src_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) u16 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) u16 event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) u16 reg_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) u8 subsys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) u8 sop:5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) u8 arg_c_t:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) u8 src_t:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) u8 dst_t:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) u8 op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) int cmdq_dev_get_client_reg(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct cmdq_client_reg *client_reg, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct of_phandle_args spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (!client_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) err = of_parse_phandle_with_fixed_args(dev->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) "mediatek,gce-client-reg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) 3, idx, &spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) "error %d can't parse gce-client-reg property (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) err, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) client_reg->subsys = (u8)spec.args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) client_reg->offset = (u16)spec.args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) client_reg->size = (u16)spec.args[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) of_node_put(spec.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) EXPORT_SYMBOL(cmdq_dev_get_client_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static void cmdq_client_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct cmdq_client *client = from_timer(client, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) dev_err(client->client.dev, "cmdq timeout!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct cmdq_client *client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) client = kzalloc(sizeof(*client), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (!client)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return (struct cmdq_client *)-ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) client->timeout_ms = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (timeout != CMDQ_NO_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) spin_lock_init(&client->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) timer_setup(&client->timer, cmdq_client_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) client->pkt_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) client->client.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) client->client.tx_block = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) client->client.knows_txdone = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) client->chan = mbox_request_channel(&client->client, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (IS_ERR(client->chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) long err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) dev_err(dev, "failed to request channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) err = PTR_ERR(client->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) kfree(client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) EXPORT_SYMBOL(cmdq_mbox_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void cmdq_mbox_destroy(struct cmdq_client *client)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) spin_lock(&client->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) del_timer_sync(&client->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) spin_unlock(&client->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) mbox_free_channel(client->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) kfree(client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) EXPORT_SYMBOL(cmdq_mbox_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct cmdq_pkt *pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (!pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) pkt->va_base = kzalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (!pkt->va_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) kfree(pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) pkt->buf_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) pkt->cl = (void *)client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) dev = client->chan->mbox->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (dma_mapping_error(dev, dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) kfree(pkt->va_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) kfree(pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) pkt->pa_base = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) EXPORT_SYMBOL(cmdq_pkt_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) kfree(pkt->va_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) kfree(pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) EXPORT_SYMBOL(cmdq_pkt_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct cmdq_instruction inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct cmdq_instruction *cmd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * In the case of allocated buffer size (pkt->buf_size) is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * up, the real required size (pkt->cmdq_buf_size) is still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * increased, so that the user knows how much memory should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * ultimately allocated after appending all commands and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * flushing the command packet. Therefor, the user can call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * cmdq_pkt_create() again with the real required buffer size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) pkt->cmd_buf_size += CMDQ_INST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) WARN_ONCE(1, "%s: buffer size %u is too small !\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) __func__, (u32)pkt->buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) *cmd_ptr = inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) pkt->cmd_buf_size += CMDQ_INST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct cmdq_instruction inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) inst.op = CMDQ_CODE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) inst.value = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) inst.offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) inst.subsys = subsys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) EXPORT_SYMBOL(cmdq_pkt_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) u16 offset, u32 value, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct cmdq_instruction inst = { {0} };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) u16 offset_mask = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (mask != 0xffffffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) inst.op = CMDQ_CODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) inst.mask = ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) err = cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) offset_mask |= CMDQ_WRITE_ENABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) EXPORT_SYMBOL(cmdq_pkt_write_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) u16 reg_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct cmdq_instruction inst = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) inst.op = CMDQ_CODE_READ_S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) inst.dst_t = CMDQ_REG_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) inst.sop = high_addr_reg_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) inst.reg_dst = reg_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) inst.src_reg = addr_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) EXPORT_SYMBOL(cmdq_pkt_read_s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) u16 addr_low, u16 src_reg_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct cmdq_instruction inst = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) inst.op = CMDQ_CODE_WRITE_S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) inst.src_t = CMDQ_REG_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) inst.sop = high_addr_reg_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) inst.offset = addr_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) inst.src_reg = src_reg_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) EXPORT_SYMBOL(cmdq_pkt_write_s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) u16 addr_low, u16 src_reg_idx, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct cmdq_instruction inst = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) inst.op = CMDQ_CODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) inst.mask = ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) err = cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) inst.mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) inst.op = CMDQ_CODE_WRITE_S_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) inst.src_t = CMDQ_REG_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) inst.sop = high_addr_reg_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) inst.offset = addr_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) inst.src_reg = src_reg_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) EXPORT_SYMBOL(cmdq_pkt_write_s_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) u16 addr_low, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct cmdq_instruction inst = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) inst.op = CMDQ_CODE_WRITE_S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) inst.sop = high_addr_reg_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) inst.offset = addr_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) inst.value = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) EXPORT_SYMBOL(cmdq_pkt_write_s_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) u16 addr_low, u32 value, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct cmdq_instruction inst = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) inst.op = CMDQ_CODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) inst.mask = ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) err = cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) inst.op = CMDQ_CODE_WRITE_S_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) inst.sop = high_addr_reg_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) inst.offset = addr_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) inst.value = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) EXPORT_SYMBOL(cmdq_pkt_write_s_mask_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct cmdq_instruction inst = { {0} };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (event >= CMDQ_MAX_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) inst.op = CMDQ_CODE_WFE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) inst.value = CMDQ_WFE_OPTION | clear_option;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) inst.event = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) EXPORT_SYMBOL(cmdq_pkt_wfe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct cmdq_instruction inst = { {0} };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (event >= CMDQ_MAX_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) inst.op = CMDQ_CODE_WFE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) inst.value = CMDQ_WFE_UPDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) inst.event = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) EXPORT_SYMBOL(cmdq_pkt_clear_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct cmdq_instruction inst = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (event >= CMDQ_MAX_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) inst.op = CMDQ_CODE_WFE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) inst.event = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) EXPORT_SYMBOL(cmdq_pkt_set_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) u16 offset, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct cmdq_instruction inst = { {0} };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) inst.op = CMDQ_CODE_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) inst.value = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) inst.offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) inst.subsys = subsys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) err = cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) EXPORT_SYMBOL(cmdq_pkt_poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) u16 offset, u32 value, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct cmdq_instruction inst = { {0} };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) inst.op = CMDQ_CODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) inst.mask = ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) err = cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) offset = offset | CMDQ_POLL_ENABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) err = cmdq_pkt_poll(pkt, subsys, offset, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) EXPORT_SYMBOL(cmdq_pkt_poll_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct cmdq_instruction inst = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) inst.op = CMDQ_CODE_LOGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) inst.dst_t = CMDQ_REG_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) inst.reg_dst = reg_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) inst.value = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) EXPORT_SYMBOL(cmdq_pkt_assign);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct cmdq_instruction inst = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) inst.op = CMDQ_CODE_JUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) inst.offset = CMDQ_JUMP_RELATIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) inst.value = addr >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) EXPORT_SYMBOL(cmdq_pkt_jump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct cmdq_instruction inst = { {0} };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* insert EOC and generate IRQ for each command iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) inst.op = CMDQ_CODE_EOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) inst.value = CMDQ_EOC_IRQ_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) err = cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /* JUMP to end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) inst.op = CMDQ_CODE_JUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) inst.value = CMDQ_JUMP_PASS >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) err = cmdq_pkt_append_command(pkt, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) EXPORT_SYMBOL(cmdq_pkt_finalize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct cmdq_task_cb *cb = &pkt->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) spin_lock_irqsave(&client->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (--client->pkt_cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) del_timer(&client->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) mod_timer(&client->timer, jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) msecs_to_jiffies(client->timeout_ms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) spin_unlock_irqrestore(&client->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) pkt->cmd_buf_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (cb->cb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) data.data = cb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) cb->cb(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) pkt->cb.cb = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) pkt->cb.data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) pkt->async_cb.data = pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) pkt->cmd_buf_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) spin_lock_irqsave(&client->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (client->pkt_cnt++ == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) mod_timer(&client->timer, jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) msecs_to_jiffies(client->timeout_ms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) spin_unlock_irqrestore(&client->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) err = mbox_send_message(client->chan, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* We can send next packet immediately, so just call txdone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) mbox_client_txdone(client->chan, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) EXPORT_SYMBOL(cmdq_pkt_flush_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct cmdq_flush_completion {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct completion cmplt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) bool err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct cmdq_flush_completion *cmplt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) cmplt = (struct cmdq_flush_completion *)data.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (data.sta != CMDQ_CB_NORMAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) cmplt->err = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) cmplt->err = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) complete(&cmplt->cmplt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int cmdq_pkt_flush(struct cmdq_pkt *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct cmdq_flush_completion cmplt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) init_completion(&cmplt.cmplt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) wait_for_completion(&cmplt.cmplt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return cmplt.err ? -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) EXPORT_SYMBOL(cmdq_pkt_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) MODULE_LICENSE("GPL v2");