scsi: lpfc: Release hbalock before calling lpfc_worker_wake_up()

[ Upstream commit ded20192df ]

lpfc_worker_wake_up() calls the lpfc_work_done() routine, which takes the
hbalock.  Thus, lpfc_worker_wake_up() should not be called while holding the
hbalock to avoid potential deadlock.

Signed-off-by: Justin Tee <justin.tee@broadcom.com>
Link: https://lore.kernel.org/r/20240305200503.57317-7-justintee8345@gmail.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Justin Tee 2024-03-05 12:04:57 -08:00 committed by Greg Kroah-Hartman
parent e25dca8db0
commit 6503c39398
3 changed files with 19 additions and 20 deletions

View file

@ -4384,23 +4384,23 @@ lpfc_els_retry_delay(struct timer_list *t)
unsigned long flags;
struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
/* Hold a node reference for outstanding queued work */
if (!lpfc_nlp_get(ndlp))
return;
spin_lock_irqsave(&phba->hbalock, flags);
if (!list_empty(&evtp->evt_listp)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_nlp_put(ndlp);
return;
}
/* We need to hold the node by incrementing the reference
* count until the queued work is done
*/
evtp->evt_arg1 = lpfc_nlp_get(ndlp);
if (evtp->evt_arg1) {
evtp->evt = LPFC_EVT_ELS_RETRY;
list_add_tail(&evtp->evt_listp, &phba->work_list);
lpfc_worker_wake_up(phba);
}
evtp->evt_arg1 = ndlp;
evtp->evt = LPFC_EVT_ELS_RETRY;
list_add_tail(&evtp->evt_listp, &phba->work_list);
spin_unlock_irqrestore(&phba->hbalock, flags);
return;
lpfc_worker_wake_up(phba);
}
/**

View file

@ -241,7 +241,9 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
if (evtp->evt_arg1) {
evtp->evt = LPFC_EVT_DEV_LOSS;
list_add_tail(&evtp->evt_listp, &phba->work_list);
spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_worker_wake_up(phba);
return;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
} else {
@ -259,10 +261,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
}
}
return;
}
/**

View file

@ -1217,9 +1217,9 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
empty = list_empty(&phba->active_rrq_list);
list_add_tail(&rrq->list, &phba->active_rrq_list);
phba->hba_flag |= HBA_RRQ_ACTIVE;
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (empty)
lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(&phba->hbalock, iflags);
return 0;
out:
spin_unlock_irqrestore(&phba->hbalock, iflags);
@ -11361,18 +11361,18 @@ lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
unsigned long iflags;
struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
/* Hold a node reference for outstanding queued work */
if (!lpfc_nlp_get(ndlp))
return;
spin_lock_irqsave(&phba->hbalock, iflags);
if (!list_empty(&evtp->evt_listp)) {
spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_nlp_put(ndlp);
return;
}
/* Incrementing the reference count until the queued work is done. */
evtp->evt_arg1 = lpfc_nlp_get(ndlp);
if (!evtp->evt_arg1) {
spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
evtp->evt_arg1 = ndlp;
evtp->evt = LPFC_EVT_RECOVER_PORT;
list_add_tail(&evtp->evt_listp, &phba->work_list);
spin_unlock_irqrestore(&phba->hbalock, iflags);