^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Filename: cregs.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * (C) Copyright 2013 IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "rsxx_priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define CREG_TIMEOUT_MSEC 10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct creg_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) int st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct creg_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) creg_cmd_cb cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) void *cb_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned int op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) unsigned int addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) int cnt8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned int stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static struct kmem_cache *creg_cmd_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*------------ Private Functions --------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #if defined(__LITTLE_ENDIAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define LITTLE_ENDIAN 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #elif defined(__BIG_ENDIAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define LITTLE_ENDIAN 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #error Unknown endianess!!! Aborting...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static int copy_to_creg_data(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int cnt8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unsigned int stream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u32 *data = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (unlikely(card->eeh_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Firmware implementation makes it necessary to byte swap on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * little endian processors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (LITTLE_ENDIAN && stream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) iowrite32be(data[i], card->regmap + CREG_DATA(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) iowrite32(data[i], card->regmap + CREG_DATA(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static int copy_from_creg_data(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int cnt8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned int stream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u32 *data = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (unlikely(card->eeh_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * Firmware implementation makes it necessary to byte swap on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * little endian processors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (LITTLE_ENDIAN && stream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) data[i] = ioread32be(card->regmap + CREG_DATA(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) data[i] = ioread32(card->regmap + CREG_DATA(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (unlikely(card->eeh_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) iowrite32(cmd->addr, card->regmap + CREG_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (cmd->op == CREG_OP_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (cmd->buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) st = copy_to_creg_data(card, cmd->cnt8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) cmd->buf, cmd->stream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (unlikely(card->eeh_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* Setting the valid bit will kick off the command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) iowrite32(cmd->op, card->regmap + CREG_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static void creg_kick_queue(struct rsxx_cardinfo *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) card->creg_ctrl.active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct creg_cmd, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) list_del(&card->creg_ctrl.active_cmd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) card->creg_ctrl.q_depth--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * We have to set the timer before we push the new command. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * we could create a race condition that would occur if the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * was not canceled, and expired after the new command was pushed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * but before the command was issued to hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) mod_timer(&card->creg_ctrl.cmd_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) creg_issue_cmd(card, card->creg_ctrl.active_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static int creg_queue_cmd(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned int op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned int addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned int cnt8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int stream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) creg_cmd_cb callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) void *cb_private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct creg_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* Don't queue stuff up if we're halted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (unlikely(card->halt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (card->creg_ctrl.reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (cnt8 > MAX_CREG_DATA8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) INIT_LIST_HEAD(&cmd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) cmd->op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) cmd->addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) cmd->cnt8 = cnt8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) cmd->buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) cmd->stream = stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) cmd->cb = callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) cmd->cb_private = cb_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) cmd->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) spin_lock_bh(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) list_add_tail(&cmd->list, &card->creg_ctrl.queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) card->creg_ctrl.q_depth++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) creg_kick_queue(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) spin_unlock_bh(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static void creg_cmd_timed_out(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct rsxx_cardinfo *card = from_timer(card, t, creg_ctrl.cmd_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct creg_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) spin_lock(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) cmd = card->creg_ctrl.active_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) card->creg_ctrl.active_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) spin_unlock(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (cmd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) card->creg_ctrl.creg_stats.creg_timeout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) dev_warn(CARD_TO_DEV(card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) "No active command associated with timeout!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (cmd->cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) cmd->cb(card, cmd, -ETIMEDOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) kmem_cache_free(creg_cmd_pool, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) spin_lock(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) card->creg_ctrl.active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) creg_kick_queue(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) spin_unlock(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void creg_cmd_done(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct rsxx_cardinfo *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct creg_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int st = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) card = container_of(work, struct rsxx_cardinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) creg_ctrl.done_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * The timer could not be cancelled for some reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * race to pop the active command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) card->creg_ctrl.creg_stats.failed_cancel_timer++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) spin_lock_bh(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) cmd = card->creg_ctrl.active_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) card->creg_ctrl.active_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) spin_unlock_bh(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (cmd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) dev_err(CARD_TO_DEV(card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) "Spurious creg interrupt!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) cmd->status = card->creg_ctrl.creg_stats.stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) dev_err(CARD_TO_DEV(card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) "Invalid status on creg command\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * At this point we're probably reading garbage from HW. Don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * do anything else that could mess up the system and let
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * the sync function return an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) st = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) goto creg_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) } else if (cmd->status & CREG_STAT_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) st = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (cmd->op == CREG_OP_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* Paranoid Sanity Checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (!cmd->buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) dev_err(CARD_TO_DEV(card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) "Buffer not given for read.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) st = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) goto creg_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (cnt8 != cmd->cnt8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) dev_err(CARD_TO_DEV(card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) "count mismatch\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) st = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) goto creg_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) creg_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (cmd->cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) cmd->cb(card, cmd, st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) kmem_cache_free(creg_cmd_pool, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) spin_lock_bh(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) card->creg_ctrl.active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) creg_kick_queue(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) spin_unlock_bh(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static void creg_reset(struct rsxx_cardinfo *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct creg_cmd *cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct creg_cmd *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * mutex_trylock is used here because if reset_lock is taken then a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * reset is already happening. So, we can just go ahead and return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (!mutex_trylock(&card->creg_ctrl.reset_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) card->creg_ctrl.reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) spin_lock_irqsave(&card->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) spin_unlock_irqrestore(&card->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) dev_warn(CARD_TO_DEV(card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) "Resetting creg interface for recovery\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /* Cancel outstanding commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) spin_lock_bh(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) list_del(&cmd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) card->creg_ctrl.q_depth--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (cmd->cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) cmd->cb(card, cmd, -ECANCELED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) kmem_cache_free(creg_cmd_pool, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) cmd = card->creg_ctrl.active_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) card->creg_ctrl.active_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (timer_pending(&card->creg_ctrl.cmd_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) del_timer_sync(&card->creg_ctrl.cmd_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (cmd->cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) cmd->cb(card, cmd, -ECANCELED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) kmem_cache_free(creg_cmd_pool, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) card->creg_ctrl.active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) spin_unlock_bh(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) card->creg_ctrl.reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) spin_lock_irqsave(&card->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) spin_unlock_irqrestore(&card->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) mutex_unlock(&card->creg_ctrl.reset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Used for synchronous accesses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct creg_completion {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct completion *cmd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) int st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) u32 creg_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct creg_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) int st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct creg_completion *cmd_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) cmd_completion = cmd->cb_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) BUG_ON(!cmd_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) cmd_completion->st = st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) cmd_completion->creg_status = cmd->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) complete(cmd_completion->cmd_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static int __issue_creg_rw(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) unsigned int op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) unsigned int addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) unsigned int cnt8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) int stream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) unsigned int *hw_stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) DECLARE_COMPLETION_ONSTACK(cmd_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct creg_completion completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) int st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) completion.cmd_done = &cmd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) completion.st = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) completion.creg_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) &completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * This timeout is necessary for unresponsive hardware. The additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * 20 seconds to used to guarantee that each cregs requests has time to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) card->creg_ctrl.q_depth + 20000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * The creg interface is guaranteed to complete. It has a timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * mechanism that will kick in if hardware does not respond.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) st = wait_for_completion_timeout(completion.cmd_done, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (st == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * This is really bad, because the kernel timer did not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * expire and notify us of a timeout!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) dev_crit(CARD_TO_DEV(card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) "cregs timer failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) creg_reset(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) *hw_stat = completion.creg_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (completion.st) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * This read is needed to verify that there has not been any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * extreme errors that might have occurred, i.e. EEH. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * function iowrite32 will not detect EEH errors, so it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * necessary that we recover if such an error is the reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * for the timeout. This is a dummy read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ioread32(card->regmap + SCRATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) dev_warn(CARD_TO_DEV(card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) "creg command failed(%d x%08x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) completion.st, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return completion.st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static int issue_creg_rw(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) u32 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) unsigned int size8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) int stream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) unsigned int hw_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) unsigned int xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) unsigned int op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) op = read ? CREG_OP_READ : CREG_OP_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) st = __issue_creg_rw(card, op, addr, xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) data, stream, &hw_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) data = (char *)data + xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) addr += xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) size8 -= xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) } while (size8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /* ---------------------------- Public API ---------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) int rsxx_creg_write(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) u32 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) unsigned int size8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) int byte_stream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) int rsxx_creg_read(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) u32 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) unsigned int size8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) int byte_stream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) sizeof(*state), state, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) int st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) sizeof(size), &size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) *size8 = (u64)size * RSXX_HW_BLK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) int rsxx_get_num_targets(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) unsigned int *n_targets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) sizeof(*n_targets), n_targets, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) u32 *capabilities)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) sizeof(*capabilities), capabilities, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) sizeof(cmd), &cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /*----------------- HW Log Functions -------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) static char level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * New messages start with "<#>", where # is the log level. Messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * that extend past the log buffer will use the previous level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) level = str[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) str += 3; /* Skip past the log level. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) len -= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) switch (level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) case '0':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) case '1':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) case '2':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) case '3':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) case '4':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) case '5':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) case '6':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) case '7':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * The substrncpy function copies the src string (which includes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * terminating '\0' character), up to the count into the dest pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * Returns the number of bytes copied to dest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static int substrncpy(char *dest, const char *src, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int max_cnt = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) *dest = *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (*dest == '\0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) src++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) dest++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return max_cnt - count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static void read_hw_log_done(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct creg_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) int st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) char *log_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) int off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) buf = cmd->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* Failed getting the log message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) while (off < cmd->cnt8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) log_str = &card->log.buf[card->log.buf_len];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) len = substrncpy(log_str, &buf[off], cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) off += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) card->log.buf_len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * Flush the log if we've hit the end of a message or if we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * run out of buffer space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if ((log_str[len - 1] == '\0') ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) (card->log.buf_len == LOG_BUF_SIZE8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (card->log.buf_len != 1) /* Don't log blank lines. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) hw_log_msg(card, card->log.buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) card->log.buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) card->log.buf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (cmd->status & CREG_STAT_LOG_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) rsxx_read_hw_log(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) int rsxx_read_hw_log(struct rsxx_cardinfo *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) int st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) sizeof(card->log.tmp), card->log.tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 1, read_hw_log_done, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) dev_err(CARD_TO_DEV(card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) "Failed getting log text\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /*-------------- IOCTL REG Access ------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) static int issue_reg_cmd(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct rsxx_reg_access *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) int read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) cmd->stream, &cmd->stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) int rsxx_reg_access(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct rsxx_reg_access __user *ucmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct rsxx_reg_access cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) int st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) st = copy_from_user(&cmd, ucmd, sizeof(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (cmd.cnt > RSXX_MAX_REG_CNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) st = issue_reg_cmd(card, &cmd, read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) st = put_user(cmd.stat, &ucmd->stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct creg_cmd *cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) cmd = card->creg_ctrl.active_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) card->creg_ctrl.active_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) del_timer_sync(&card->creg_ctrl.cmd_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) spin_lock_bh(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) list_add(&cmd->list, &card->creg_ctrl.queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) card->creg_ctrl.q_depth++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) card->creg_ctrl.active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) spin_unlock_bh(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) void rsxx_kick_creg_queue(struct rsxx_cardinfo *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) spin_lock_bh(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (!list_empty(&card->creg_ctrl.queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) creg_kick_queue(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) spin_unlock_bh(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /*------------ Initialization & Setup --------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) int rsxx_creg_setup(struct rsxx_cardinfo *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) card->creg_ctrl.active_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) card->creg_ctrl.creg_wq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) create_singlethread_workqueue(DRIVER_NAME"_creg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (!card->creg_ctrl.creg_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) mutex_init(&card->creg_ctrl.reset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) INIT_LIST_HEAD(&card->creg_ctrl.queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) spin_lock_init(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) timer_setup(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) void rsxx_creg_destroy(struct rsxx_cardinfo *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) struct creg_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct creg_cmd *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /* Cancel outstanding commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) spin_lock_bh(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) list_del(&cmd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (cmd->cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) cmd->cb(card, cmd, -ECANCELED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) kmem_cache_free(creg_cmd_pool, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) dev_info(CARD_TO_DEV(card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) "Canceled %d queue creg commands\n", cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) cmd = card->creg_ctrl.active_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) card->creg_ctrl.active_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (timer_pending(&card->creg_ctrl.cmd_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) del_timer_sync(&card->creg_ctrl.cmd_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (cmd->cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) cmd->cb(card, cmd, -ECANCELED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) dev_info(CARD_TO_DEV(card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) "Canceled active creg command\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) kmem_cache_free(creg_cmd_pool, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) spin_unlock_bh(&card->creg_ctrl.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) cancel_work_sync(&card->creg_ctrl.done_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) int rsxx_creg_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (!creg_cmd_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) void rsxx_creg_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) kmem_cache_destroy(creg_cmd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }