^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * MMC software queue support based on command queue interfaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2019 Linaro, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Baolin Wang <baolin.wang@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/mmc/card.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mmc/host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "mmc_hsq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define HSQ_NUM_SLOTS 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define HSQ_INVALID_TAG HSQ_NUM_SLOTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static void mmc_hsq_retry_handler(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct mmc_host *mmc = hsq->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) mmc->ops->request(mmc, hsq->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct mmc_host *mmc = hsq->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct hsq_slot *slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) spin_lock_irqsave(&hsq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* Make sure we are not already running a request now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (hsq->mrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) spin_unlock_irqrestore(&hsq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Make sure there are remain requests need to pump */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (!hsq->qcnt || !hsq->enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) spin_unlock_irqrestore(&hsq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) slot = &hsq->slot[hsq->next_tag];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) hsq->mrq = slot->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) hsq->qcnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) spin_unlock_irqrestore(&hsq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (mmc->ops->request_atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) ret = mmc->ops->request_atomic(mmc, hsq->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) mmc->ops->request(mmc, hsq->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * If returning BUSY from request_atomic(), which means the card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * may be busy now, and we should change to non-atomic context to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * try again for this unusual case, to avoid time-consuming operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * in the atomic context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Note: we just give a warning for other error cases, since the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * driver will handle them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (ret == -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) schedule_work(&hsq->retry_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) WARN_ON_ONCE(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct hsq_slot *slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * If there are no remain requests in software queue, then set a invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * tag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (!remains) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) hsq->next_tag = HSQ_INVALID_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Increasing the next tag and check if the corresponding request is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * available, if yes, then we found a candidate request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (++hsq->next_tag != HSQ_INVALID_TAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) slot = &hsq->slot[hsq->next_tag];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (slot->mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Othersie we should iterate all slots to find a available tag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) slot = &hsq->slot[tag];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (slot->mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (tag == HSQ_NUM_SLOTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) tag = HSQ_INVALID_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) hsq->next_tag = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static void mmc_hsq_post_request(struct mmc_hsq *hsq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int remains;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) spin_lock_irqsave(&hsq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) remains = hsq->qcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) hsq->mrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* Update the next available tag to be queued. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) mmc_hsq_update_next_tag(hsq, remains);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (hsq->waiting_for_idle && !remains) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) hsq->waiting_for_idle = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) wake_up(&hsq->wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* Do not pump new request in recovery mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (hsq->recovery_halt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) spin_unlock_irqrestore(&hsq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) spin_unlock_irqrestore(&hsq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * Try to pump new request to host controller as fast as possible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * after completing previous request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (remains > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) mmc_hsq_pump_requests(hsq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * mmc_hsq_finalize_request - finalize one request if the request is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * @mmc: the host controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * @mrq: the request need to be finalized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * Return true if we finalized the corresponding request in software queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * otherwise return false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct mmc_hsq *hsq = mmc->cqe_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) spin_lock_irqsave(&hsq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) spin_unlock_irqrestore(&hsq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * Clear current completed slot request to make a room for new request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) hsq->slot[hsq->next_tag].mrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) spin_unlock_irqrestore(&hsq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) mmc_cqe_request_done(mmc, hsq->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) mmc_hsq_post_request(hsq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static void mmc_hsq_recovery_start(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct mmc_hsq *hsq = mmc->cqe_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) spin_lock_irqsave(&hsq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) hsq->recovery_halt = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) spin_unlock_irqrestore(&hsq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct mmc_hsq *hsq = mmc->cqe_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int remains;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) spin_lock_irq(&hsq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) hsq->recovery_halt = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) remains = hsq->qcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) spin_unlock_irq(&hsq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * Try to pump new request if there are request pending in software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * queue after finishing recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (remains > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) mmc_hsq_pump_requests(hsq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct mmc_hsq *hsq = mmc->cqe_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int tag = mrq->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) spin_lock_irq(&hsq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (!hsq->enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) spin_unlock_irq(&hsq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* Do not queue any new requests in recovery mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (hsq->recovery_halt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) spin_unlock_irq(&hsq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) hsq->slot[tag].mrq = mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * Set the next tag as current request tag if no available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * next tag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (hsq->next_tag == HSQ_INVALID_TAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) hsq->next_tag = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) hsq->qcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) spin_unlock_irq(&hsq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) mmc_hsq_pump_requests(hsq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (mmc->ops->post_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) mmc->ops->post_req(mmc, mrq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) bool is_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) spin_lock_irq(&hsq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) is_idle = (!hsq->mrq && !hsq->qcnt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) hsq->recovery_halt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) *ret = hsq->recovery_halt ? -EBUSY : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) hsq->waiting_for_idle = !is_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) spin_unlock_irq(&hsq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return is_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct mmc_hsq *hsq = mmc->cqe_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) wait_event(hsq->wait_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) mmc_hsq_queue_is_idle(hsq, &ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static void mmc_hsq_disable(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct mmc_hsq *hsq = mmc->cqe_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) u32 timeout = 500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) spin_lock_irq(&hsq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (!hsq->enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) spin_unlock_irq(&hsq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) spin_unlock_irq(&hsq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) ret = wait_event_timeout(hsq->wait_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) mmc_hsq_queue_is_idle(hsq, &ret),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) msecs_to_jiffies(timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) pr_warn("could not stop mmc software queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) spin_lock_irq(&hsq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) hsq->enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) spin_unlock_irq(&hsq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct mmc_hsq *hsq = mmc->cqe_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) spin_lock_irq(&hsq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (hsq->enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) spin_unlock_irq(&hsq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) hsq->enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) spin_unlock_irq(&hsq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static const struct mmc_cqe_ops mmc_hsq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) .cqe_enable = mmc_hsq_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) .cqe_disable = mmc_hsq_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) .cqe_request = mmc_hsq_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) .cqe_post_req = mmc_hsq_post_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) .cqe_wait_for_idle = mmc_hsq_wait_for_idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) .cqe_recovery_start = mmc_hsq_recovery_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) .cqe_recovery_finish = mmc_hsq_recovery_finish,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) hsq->num_slots = HSQ_NUM_SLOTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) hsq->next_tag = HSQ_INVALID_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) sizeof(struct hsq_slot), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!hsq->slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) hsq->mmc = mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) hsq->mmc->cqe_private = hsq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) mmc->cqe_ops = &mmc_hsq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) spin_lock_init(&hsq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) init_waitqueue_head(&hsq->wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) EXPORT_SYMBOL_GPL(mmc_hsq_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) void mmc_hsq_suspend(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) mmc_hsq_disable(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) int mmc_hsq_resume(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return mmc_hsq_enable(mmc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) EXPORT_SYMBOL_GPL(mmc_hsq_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) MODULE_DESCRIPTION("MMC Host Software Queue support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) MODULE_LICENSE("GPL v2");