mmc: core: Remove mq->use_cqe from the struct mmc_queue

The host->cqe_enabled is already containing the needed information about
whether the CQE is enabled or not, hence there is no need to keep another
copy of it around.

Signed-off-by: Luca Porzio <lporzio@micron.com>
Signed-off-by: Zhan Liu <zliua@micron.com>
Link: https://lore.kernel.org/r/20210215003217.GA12240@lupo-laptop
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
This commit is contained in:
Luca Porzio 2021-02-15 01:32:18 +01:00 committed by Ulf Hansson
parent 6f1d324766
commit 407a1c570f
3 changed files with 9 additions and 10 deletions

View file

@ -1933,8 +1933,9 @@ static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
void mmc_blk_mq_complete(struct request *req)
{
struct mmc_queue *mq = req->q->queuedata;
struct mmc_host *host = mq->card->host;
if (mq->use_cqe)
if (host->cqe_enabled)
mmc_blk_cqe_complete_rq(mq, req);
else if (likely(!blk_should_fake_timeout(req->q)))
mmc_blk_mq_complete_rq(mq, req);
@ -2179,7 +2180,7 @@ static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
{
if (mq->use_cqe)
if (host->cqe_enabled)
return host->cqe_ops->cqe_wait_for_idle(host);
return mmc_blk_rw_wait(mq, NULL);
@ -2228,7 +2229,7 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
break;
case REQ_OP_READ:
case REQ_OP_WRITE:
if (mq->use_cqe)
if (host->cqe_enabled)
ret = mmc_blk_cqe_issue_rw_rq(mq, req);
else
ret = mmc_blk_mq_issue_rw_rq(mq, req);

View file

@ -60,7 +60,7 @@ enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
{
struct mmc_host *host = mq->card->host;
if (mq->use_cqe && !host->hsq_enabled)
if (host->cqe_enabled && !host->hsq_enabled)
return mmc_cqe_issue_type(host, req);
if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
@ -127,7 +127,7 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
bool ignore_tout;
spin_lock_irqsave(&mq->lock, flags);
ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled;
ignore_tout = mq->recovery_needed || !host->cqe_enabled || host->hsq_enabled;
spin_unlock_irqrestore(&mq->lock, flags);
return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
@ -144,7 +144,7 @@ static void mmc_mq_recovery_handler(struct work_struct *work)
mq->in_recovery = true;
if (mq->use_cqe && !host->hsq_enabled)
if (host->cqe_enabled && !host->hsq_enabled)
mmc_blk_cqe_recovery(mq);
else
mmc_blk_mq_recovery(mq);
@ -315,7 +315,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
if (get_card)
mmc_get_card(card, &mq->ctx);
if (mq->use_cqe) {
if (host->cqe_enabled) {
host->retune_now = host->need_retune && cqe_retune_ok &&
!host->hold_retune;
}
@ -430,7 +430,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
int ret;
mq->card = card;
mq->use_cqe = host->cqe_enabled;
spin_lock_init(&mq->lock);
@ -440,7 +439,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
* The queue depth for CQE must match the hardware because the request
* tag is used to index the hardware queue.
*/
if (mq->use_cqe && !host->hsq_enabled)
if (host->cqe_enabled && !host->hsq_enabled)
mq->tag_set.queue_depth =
min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
else

View file

@ -82,7 +82,6 @@ struct mmc_queue {
unsigned int cqe_busy;
#define MMC_CQE_DCMD_BUSY BIT(0)
bool busy;
bool use_cqe;
bool recovery_needed;
bool in_recovery;
bool rw_wait;