IB/qib, staging/rdma/hfi1, IB/rdmavt: progress selection changes

The non-rdamvt versions of qib and hfi1 allow for a differing
heuristic to override a schedule progress in favor of a direct
call the the progress routine.

This patch adds that for both drivers and rdmavt.

Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Mike Marciniszyn 2016-02-14 12:45:44 -08:00 committed by Doug Ledford
parent 14553ca110
commit 91702b4a39
3 changed files with 14 additions and 6 deletions

View file

@ -484,12 +484,13 @@ void qib_get_credit(struct rvt_qp *qp, u32 aeth)
* the ring but after the wqe has been
* setup.
*
* Returns 0 on success, -EINVAL on failure
* Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure
*/
int qib_check_send_wqe(struct rvt_qp *qp,
struct rvt_swqe *wqe)
{
struct rvt_ah *ah;
int ret = 0;
switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
@ -503,11 +504,13 @@ int qib_check_send_wqe(struct rvt_qp *qp,
ah = ibah_to_rvtah(wqe->ud_wr.ah);
if (wqe->length > (1 << ah->log_pmtu))
return -EINVAL;
/* progress hint */
ret = 1;
break;
default:
break;
}
return 0;
return ret;
}
#ifdef CONFIG_DEBUG_FS

View file

@ -1430,7 +1430,9 @@ static inline u32 qp_get_savail(struct rvt_qp *qp)
* @qp: the QP to post on
* @wr: the work request to send
*/
static int rvt_post_one_wr(struct rvt_qp *qp, struct ib_send_wr *wr)
static int rvt_post_one_wr(struct rvt_qp *qp,
struct ib_send_wr *wr,
int *call_send)
{
struct rvt_swqe *wqe;
u32 next;
@ -1532,8 +1534,10 @@ static int rvt_post_one_wr(struct rvt_qp *qp, struct ib_send_wr *wr)
/* general part of wqe valid - allow for driver checks */
if (rdi->driver_f.check_send_wqe) {
ret = rdi->driver_f.check_send_wqe(qp, wqe);
if (ret)
if (ret < 0)
goto bail_inval_free;
if (ret)
*call_send = ret;
}
log_pmtu = qp->log_pmtu;
@ -1606,7 +1610,7 @@ int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
for (; wr; wr = wr->next) {
err = rvt_post_one_wr(qp, wr);
err = rvt_post_one_wr(qp, wr, &call_send);
if (unlikely(err)) {
*bad_wr = wr;
goto bail;

View file

@ -73,6 +73,7 @@ static int iowait_sleep(
struct sdma_txreq *stx,
unsigned seq);
static void iowait_wakeup(struct iowait *wait, int reason);
static void qp_pio_drain(struct rvt_qp *qp);
static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
struct rvt_qpn_map *map, unsigned off)
@ -272,7 +273,7 @@ int hfi1_check_send_wqe(struct rvt_qp *qp,
default:
break;
}
return 0;
return wqe->length <= piothreshold;
}
/**