IB/hfi1: Fix deadlock with txreq allocation slow path

A failure in the get_txreq() inline will result in a
slow path retry using __get_txreq().

__get_txreq() attempts to procure the qp s_lock, which
is already held in all callers.

Fix by deleting the s_lock maintenance in __get_txreq()
and add sparse syntax hooks to future proof the code.

Cc: Stable <stable@vger.kernel.org> # 4.6+
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Mike Marciniszyn 2016-06-17 19:17:49 -07:00 committed by Doug Ledford
parent 34d351f8dd
commit 2aee309d3e
2 changed files with 2 additions and 3 deletions

View file

@ -92,11 +92,10 @@ void hfi1_put_txreq(struct verbs_txreq *tx)
struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp)
__must_hold(&qp->s_lock)
{
struct verbs_txreq *tx = ERR_PTR(-EBUSY);
unsigned long flags;
spin_lock_irqsave(&qp->s_lock, flags);
write_seqlock(&dev->iowait_lock);
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
struct hfi1_qp_priv *priv;
@ -116,7 +115,6 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
}
out:
write_sequnlock(&dev->iowait_lock);
spin_unlock_irqrestore(&qp->s_lock, flags);
return tx;
}

View file

@ -73,6 +73,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp)
__must_hold(&qp->slock)
{
struct verbs_txreq *tx;
struct hfi1_qp_priv *priv = qp->priv;