mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 16:07:39 +00:00
IB/qib, IB/hfi1: Fix up UD loopback use of irq flags
The dual lock patch moved locking around and missed an issue
with handling irq flags when processing UD loopback
packets. This issue was revealed by smatch.
Fix for both qib and hfi1 to pass the saved flags to the UD request
builder and handle the changes correctly.
Fixes: 46a80d62e6
("IB/qib, staging/rdma/hfi1: add s_hlock for use in post send")
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
f39cc34df7
commit
747f4d7a9d
8 changed files with 28 additions and 25 deletions
|
@ -230,7 +230,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
|
|||
*
|
||||
* Return 1 if constructed; otherwise, return 0.
|
||||
*/
|
||||
int qib_make_rc_req(struct rvt_qp *qp)
|
||||
int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
|
||||
{
|
||||
struct qib_qp_priv *priv = qp->priv;
|
||||
struct qib_ibdev *dev = to_idev(qp->ibqp.device);
|
||||
|
|
|
@ -739,7 +739,7 @@ void qib_do_send(struct rvt_qp *qp)
|
|||
struct qib_qp_priv *priv = qp->priv;
|
||||
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
|
||||
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
|
||||
int (*make_req)(struct rvt_qp *qp);
|
||||
int (*make_req)(struct rvt_qp *qp, unsigned long *flags);
|
||||
unsigned long flags;
|
||||
|
||||
if ((qp->ibqp.qp_type == IB_QPT_RC ||
|
||||
|
@ -781,7 +781,7 @@ void qib_do_send(struct rvt_qp *qp)
|
|||
qp->s_hdrwords = 0;
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
}
|
||||
} while (make_req(qp));
|
||||
} while (make_req(qp, &flags));
|
||||
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
*
|
||||
* Return 1 if constructed; otherwise, return 0.
|
||||
*/
|
||||
int qib_make_uc_req(struct rvt_qp *qp)
|
||||
int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
|
||||
{
|
||||
struct qib_qp_priv *priv = qp->priv;
|
||||
struct qib_other_headers *ohdr;
|
||||
|
|
|
@ -238,7 +238,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
|
|||
*
|
||||
* Return 1 if constructed; otherwise, return 0.
|
||||
*/
|
||||
int qib_make_ud_req(struct rvt_qp *qp)
|
||||
int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
|
||||
{
|
||||
struct qib_qp_priv *priv = qp->priv;
|
||||
struct qib_other_headers *ohdr;
|
||||
|
@ -294,7 +294,7 @@ int qib_make_ud_req(struct rvt_qp *qp)
|
|||
this_cpu_inc(ibp->pmastats->n_unicast_xmit);
|
||||
lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
|
||||
if (unlikely(lid == ppd->lid)) {
|
||||
unsigned long flags;
|
||||
unsigned long tflags = *flags;
|
||||
/*
|
||||
* If DMAs are in progress, we can't generate
|
||||
* a completion for the loopback packet since
|
||||
|
@ -307,10 +307,10 @@ int qib_make_ud_req(struct rvt_qp *qp)
|
|||
goto bail;
|
||||
}
|
||||
qp->s_cur = next_cur;
|
||||
local_irq_save(flags);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
spin_unlock_irqrestore(&qp->s_lock, tflags);
|
||||
qib_ud_loopback(qp, wqe);
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
spin_lock_irqsave(&qp->s_lock, tflags);
|
||||
*flags = tflags;
|
||||
qib_send_complete(qp, wqe, IB_WC_SUCCESS);
|
||||
goto done;
|
||||
}
|
||||
|
|
|
@ -430,11 +430,11 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
|
|||
|
||||
void qib_send_rc_ack(struct rvt_qp *qp);
|
||||
|
||||
int qib_make_rc_req(struct rvt_qp *qp);
|
||||
int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags);
|
||||
|
||||
int qib_make_uc_req(struct rvt_qp *qp);
|
||||
int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags);
|
||||
|
||||
int qib_make_ud_req(struct rvt_qp *qp);
|
||||
int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags);
|
||||
|
||||
int qib_register_ib_device(struct qib_devdata *);
|
||||
|
||||
|
|
|
@ -831,7 +831,6 @@ void hfi1_do_send(struct rvt_qp *qp)
|
|||
struct hfi1_pkt_state ps;
|
||||
struct hfi1_qp_priv *priv = qp->priv;
|
||||
int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
|
||||
unsigned long flags;
|
||||
unsigned long timeout;
|
||||
unsigned long timeout_int;
|
||||
int cpu;
|
||||
|
@ -866,11 +865,11 @@ void hfi1_do_send(struct rvt_qp *qp)
|
|||
timeout_int = SEND_RESCHED_TIMEOUT;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
spin_lock_irqsave(&qp->s_lock, ps.flags);
|
||||
|
||||
/* Return if we are already busy processing a work request. */
|
||||
if (!hfi1_send_ok(qp)) {
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
spin_unlock_irqrestore(&qp->s_lock, ps.flags);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -884,7 +883,7 @@ void hfi1_do_send(struct rvt_qp *qp)
|
|||
do {
|
||||
/* Check for a constructed packet to be sent. */
|
||||
if (qp->s_hdrwords != 0) {
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
spin_unlock_irqrestore(&qp->s_lock, ps.flags);
|
||||
/*
|
||||
* If the packet cannot be sent now, return and
|
||||
* the send tasklet will be woken up later.
|
||||
|
@ -897,11 +896,14 @@ void hfi1_do_send(struct rvt_qp *qp)
|
|||
if (unlikely(time_after(jiffies, timeout))) {
|
||||
if (workqueue_congested(cpu,
|
||||
ps.ppd->hfi1_wq)) {
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
spin_lock_irqsave(
|
||||
&qp->s_lock,
|
||||
ps.flags);
|
||||
qp->s_flags &= ~RVT_S_BUSY;
|
||||
hfi1_schedule_send(qp);
|
||||
spin_unlock_irqrestore(&qp->s_lock,
|
||||
flags);
|
||||
spin_unlock_irqrestore(
|
||||
&qp->s_lock,
|
||||
ps.flags);
|
||||
this_cpu_inc(
|
||||
*ps.ppd->dd->send_schedule);
|
||||
return;
|
||||
|
@ -913,11 +915,11 @@ void hfi1_do_send(struct rvt_qp *qp)
|
|||
}
|
||||
timeout = jiffies + (timeout_int) / 8;
|
||||
}
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
spin_lock_irqsave(&qp->s_lock, ps.flags);
|
||||
}
|
||||
} while (make_req(qp, &ps));
|
||||
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
spin_unlock_irqrestore(&qp->s_lock, ps.flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -322,7 +322,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
|||
(lid == ppd->lid ||
|
||||
(lid == be16_to_cpu(IB_LID_PERMISSIVE) &&
|
||||
qp->ibqp.qp_type == IB_QPT_GSI)))) {
|
||||
unsigned long flags;
|
||||
unsigned long tflags = ps->flags;
|
||||
/*
|
||||
* If DMAs are in progress, we can't generate
|
||||
* a completion for the loopback packet since
|
||||
|
@ -335,10 +335,10 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
|||
goto bail;
|
||||
}
|
||||
qp->s_cur = next_cur;
|
||||
local_irq_save(flags);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
spin_unlock_irqrestore(&qp->s_lock, tflags);
|
||||
ud_loopback(qp, wqe);
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
spin_lock_irqsave(&qp->s_lock, tflags);
|
||||
ps->flags = tflags;
|
||||
hfi1_send_complete(qp, wqe, IB_WC_SUCCESS);
|
||||
goto done_free_tx;
|
||||
}
|
||||
|
|
|
@ -215,6 +215,7 @@ struct hfi1_pkt_state {
|
|||
struct hfi1_ibport *ibp;
|
||||
struct hfi1_pportdata *ppd;
|
||||
struct verbs_txreq *s_txreq;
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
#define HFI1_PSN_CREDIT 16
|
||||
|
|
Loading…
Reference in a new issue