IB/hfi1: Move receive work queue struct into uapi directory

The rvt_rwqe and rvt_rwq struct elements are shared between rdmavt and the
providers but are not in uapi directory.  As per the comment in
https://marc.info/?l=linux-rdma&m=152296522708522&w=2, The hfi1 driver and
the rdma core driver are not using shared structures in the uapi
directory.

Move rvt_rwqe and rvt_rwq struct into rvt-abi.h header in uapi directory.

Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: Kamenee Arumugam <kamenee.arumugam@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Kamenee Arumugam 2019-06-28 14:04:24 -04:00 committed by Jason Gunthorpe
parent 239b0e52d8
commit dabac6e460
6 changed files with 212 additions and 92 deletions

View file

@ -802,6 +802,46 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
} }
} }
/**
* rvt_alloc_rq - allocate memory for user or kernel buffer
* @rq: receive queue data structure
* @size: number of request queue entries
* @node: The NUMA node
* @udata: True if user data is available or not false
*
* Return: If memory allocation failed, return -ENONEM
* This function is used by both shared receive
* queues and non-shared receive queues to allocate
* memory.
*/
int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
struct ib_udata *udata)
{
if (udata) {
rq->wq = vmalloc_user(sizeof(struct rvt_rwq) + size);
if (!rq->wq)
goto bail;
/* need kwq with no buffers */
rq->kwq = kzalloc_node(sizeof(*rq->kwq), GFP_KERNEL, node);
if (!rq->kwq)
goto bail;
rq->kwq->curr_wq = rq->wq->wq;
} else {
/* need kwq with buffers */
rq->kwq =
vzalloc_node(sizeof(struct rvt_krwq) + size, node);
if (!rq->kwq)
goto bail;
rq->kwq->curr_wq = rq->kwq->wq;
}
spin_lock_init(&rq->lock);
return 0;
bail:
rvt_free_rq(rq);
return -ENOMEM;
}
/** /**
* rvt_init_qp - initialize the QP state to the reset state * rvt_init_qp - initialize the QP state to the reset state
* @qp: the QP to init or reinit * @qp: the QP to init or reinit
@ -852,10 +892,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
qp->s_tail_ack_queue = 0; qp->s_tail_ack_queue = 0;
qp->s_acked_ack_queue = 0; qp->s_acked_ack_queue = 0;
qp->s_num_rd_atomic = 0; qp->s_num_rd_atomic = 0;
if (qp->r_rq.wq) {
qp->r_rq.wq->head = 0;
qp->r_rq.wq->tail = 0;
}
qp->r_sge.num_sge = 0; qp->r_sge.num_sge = 0;
atomic_set(&qp->s_reserved_used, 0); atomic_set(&qp->s_reserved_used, 0);
} }
@ -1046,17 +1082,12 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
qp->r_rq.max_sge = init_attr->cap.max_recv_sge; qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
sizeof(struct rvt_rwqe); sizeof(struct rvt_rwqe);
if (udata) err = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
qp->r_rq.wq = vmalloc_user( rdi->dparms.node, udata);
sizeof(struct rvt_rwq) + if (err) {
qp->r_rq.size * sz); ret = ERR_PTR(err);
else
qp->r_rq.wq = vzalloc_node(
sizeof(struct rvt_rwq) +
qp->r_rq.size * sz,
rdi->dparms.node);
if (!qp->r_rq.wq)
goto bail_driver_priv; goto bail_driver_priv;
}
} }
/* /*
@ -1202,8 +1233,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
bail_rq_wq: bail_rq_wq:
if (!qp->ip) rvt_free_rq(&qp->r_rq);
vfree(qp->r_rq.wq);
bail_driver_priv: bail_driver_priv:
rdi->driver_f.qp_priv_free(rdi, qp); rdi->driver_f.qp_priv_free(rdi, qp);
@ -1269,19 +1299,26 @@ int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
} }
wc.status = IB_WC_WR_FLUSH_ERR; wc.status = IB_WC_WR_FLUSH_ERR;
if (qp->r_rq.wq) { if (qp->r_rq.kwq) {
struct rvt_rwq *wq;
u32 head; u32 head;
u32 tail; u32 tail;
struct rvt_rwq *wq = NULL;
struct rvt_krwq *kwq = NULL;
spin_lock(&qp->r_rq.lock); spin_lock(&qp->r_rq.lock);
/* qp->ip used to validate if there is a user buffer mmaped */
if (qp->ip) {
wq = qp->r_rq.wq;
head = RDMA_READ_UAPI_ATOMIC(wq->head);
tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
} else {
kwq = qp->r_rq.kwq;
head = kwq->head;
tail = kwq->tail;
}
/* sanity check pointers before trusting them */ /* sanity check pointers before trusting them */
wq = qp->r_rq.wq;
head = wq->head;
if (head >= qp->r_rq.size) if (head >= qp->r_rq.size)
head = 0; head = 0;
tail = wq->tail;
if (tail >= qp->r_rq.size) if (tail >= qp->r_rq.size)
tail = 0; tail = 0;
while (tail != head) { while (tail != head) {
@ -1290,8 +1327,10 @@ int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
tail = 0; tail = 0;
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
} }
wq->tail = tail; if (qp->ip)
RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
else
kwq->tail = tail;
spin_unlock(&qp->r_rq.lock); spin_unlock(&qp->r_rq.lock);
} else if (qp->ibqp.event_handler) { } else if (qp->ibqp.event_handler) {
ret = 1; ret = 1;
@ -1634,8 +1673,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (qp->ip) if (qp->ip)
kref_put(&qp->ip->ref, rvt_release_mmap_info); kref_put(&qp->ip->ref, rvt_release_mmap_info);
else kvfree(qp->r_rq.kwq);
vfree(qp->r_rq.wq);
rdi->driver_f.qp_priv_free(rdi, qp); rdi->driver_f.qp_priv_free(rdi, qp);
kfree(qp->s_ack_queue); kfree(qp->s_ack_queue);
rdma_destroy_ah_attr(&qp->remote_ah_attr); rdma_destroy_ah_attr(&qp->remote_ah_attr);
@ -1721,7 +1759,7 @@ int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr) const struct ib_recv_wr **bad_wr)
{ {
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
struct rvt_rwq *wq = qp->r_rq.wq; struct rvt_krwq *wq = qp->r_rq.kwq;
unsigned long flags; unsigned long flags;
int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) && int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
!qp->ibqp.srq; !qp->ibqp.srq;
@ -1746,7 +1784,7 @@ int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
next = wq->head + 1; next = wq->head + 1;
if (next >= qp->r_rq.size) if (next >= qp->r_rq.size)
next = 0; next = 0;
if (next == wq->tail) { if (next == READ_ONCE(wq->tail)) {
spin_unlock_irqrestore(&qp->r_rq.lock, flags); spin_unlock_irqrestore(&qp->r_rq.lock, flags);
*bad_wr = wr; *bad_wr = wr;
return -ENOMEM; return -ENOMEM;
@ -1770,8 +1808,7 @@ int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
* Make sure queue entry is written * Make sure queue entry is written
* before the head index. * before the head index.
*/ */
smp_wmb(); smp_store_release(&wq->head, next);
wq->head = next;
} }
spin_unlock_irqrestore(&qp->r_rq.lock, flags); spin_unlock_irqrestore(&qp->r_rq.lock, flags);
} }
@ -2141,7 +2178,7 @@ int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr) const struct ib_recv_wr **bad_wr)
{ {
struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
struct rvt_rwq *wq; struct rvt_krwq *wq;
unsigned long flags; unsigned long flags;
for (; wr; wr = wr->next) { for (; wr; wr = wr->next) {
@ -2155,11 +2192,11 @@ int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
} }
spin_lock_irqsave(&srq->rq.lock, flags); spin_lock_irqsave(&srq->rq.lock, flags);
wq = srq->rq.wq; wq = srq->rq.kwq;
next = wq->head + 1; next = wq->head + 1;
if (next >= srq->rq.size) if (next >= srq->rq.size)
next = 0; next = 0;
if (next == wq->tail) { if (next == READ_ONCE(wq->tail)) {
spin_unlock_irqrestore(&srq->rq.lock, flags); spin_unlock_irqrestore(&srq->rq.lock, flags);
*bad_wr = wr; *bad_wr = wr;
return -ENOMEM; return -ENOMEM;
@ -2171,8 +2208,7 @@ int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
for (i = 0; i < wr->num_sge; i++) for (i = 0; i < wr->num_sge; i++)
wqe->sg_list[i] = wr->sg_list[i]; wqe->sg_list[i] = wr->sg_list[i];
/* Make sure queue entry is written before the head index. */ /* Make sure queue entry is written before the head index. */
smp_wmb(); smp_store_release(&wq->head, next);
wq->head = next;
spin_unlock_irqrestore(&srq->rq.lock, flags); spin_unlock_irqrestore(&srq->rq.lock, flags);
} }
return 0; return 0;
@ -2229,6 +2265,25 @@ static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
return 0; return 0;
} }
/**
* get_rvt_head - get head indices of the circular buffer
* @rq: data structure for request queue entry
* @ip: the QP
*
* Return - head index value
*/
static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip)
{
u32 head;
if (ip)
head = RDMA_READ_UAPI_ATOMIC(rq->wq->head);
else
head = rq->kwq->head;
return head;
}
/** /**
* rvt_get_rwqe - copy the next RWQE into the QP's RWQE * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
* @qp: the QP * @qp: the QP
@ -2243,21 +2298,26 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
{ {
unsigned long flags; unsigned long flags;
struct rvt_rq *rq; struct rvt_rq *rq;
struct rvt_krwq *kwq;
struct rvt_rwq *wq; struct rvt_rwq *wq;
struct rvt_srq *srq; struct rvt_srq *srq;
struct rvt_rwqe *wqe; struct rvt_rwqe *wqe;
void (*handler)(struct ib_event *, void *); void (*handler)(struct ib_event *, void *);
u32 tail; u32 tail;
u32 head;
int ret; int ret;
void *ip = NULL;
if (qp->ibqp.srq) { if (qp->ibqp.srq) {
srq = ibsrq_to_rvtsrq(qp->ibqp.srq); srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
handler = srq->ibsrq.event_handler; handler = srq->ibsrq.event_handler;
rq = &srq->rq; rq = &srq->rq;
ip = srq->ip;
} else { } else {
srq = NULL; srq = NULL;
handler = NULL; handler = NULL;
rq = &qp->r_rq; rq = &qp->r_rq;
ip = qp->ip;
} }
spin_lock_irqsave(&rq->lock, flags); spin_lock_irqsave(&rq->lock, flags);
@ -2265,17 +2325,24 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
ret = 0; ret = 0;
goto unlock; goto unlock;
} }
if (ip) {
wq = rq->wq;
tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
} else {
kwq = rq->kwq;
tail = kwq->tail;
}
wq = rq->wq;
tail = wq->tail;
/* Validate tail before using it since it is user writable. */ /* Validate tail before using it since it is user writable. */
if (tail >= rq->size) if (tail >= rq->size)
tail = 0; tail = 0;
if (unlikely(tail == wq->head)) {
head = get_rvt_head(rq, ip);
if (unlikely(tail == head)) {
ret = 0; ret = 0;
goto unlock; goto unlock;
} }
/* Make sure entry is read after head index is read. */ /* Make sure entry is read after the count is read. */
smp_rmb(); smp_rmb();
wqe = rvt_get_rwqe_ptr(rq, tail); wqe = rvt_get_rwqe_ptr(rq, tail);
/* /*
@ -2285,7 +2352,10 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
*/ */
if (++tail >= rq->size) if (++tail >= rq->size)
tail = 0; tail = 0;
wq->tail = tail; if (ip)
RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
else
kwq->tail = tail;
if (!wr_id_only && !init_sge(qp, wqe)) { if (!wr_id_only && !init_sge(qp, wqe)) {
ret = -1; ret = -1;
goto unlock; goto unlock;
@ -2301,7 +2371,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
* Validate head pointer value and compute * Validate head pointer value and compute
* the number of remaining WQEs. * the number of remaining WQEs.
*/ */
n = wq->head; n = get_rvt_head(rq, ip);
if (n >= rq->size) if (n >= rq->size)
n = 0; n = 0;
if (n < tail) if (n < tail)

View file

@ -68,4 +68,6 @@ int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr); const struct ib_recv_wr **bad_wr);
int rvt_wss_init(struct rvt_dev_info *rdi); int rvt_wss_init(struct rvt_dev_info *rdi);
void rvt_wss_exit(struct rvt_dev_info *rdi); void rvt_wss_exit(struct rvt_dev_info *rdi);
int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
struct ib_udata *udata);
#endif /* DEF_RVTQP_H */ #endif /* DEF_RVTQP_H */

View file

@ -104,15 +104,19 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp)
} else { } else {
u32 min, max, x; u32 min, max, x;
u32 credits; u32 credits;
struct rvt_rwq *wq = qp->r_rq.wq;
u32 head; u32 head;
u32 tail; u32 tail;
/* sanity check pointers before trusting them */ /* sanity check pointers before trusting them */
head = wq->head; if (qp->ip) {
head = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->head);
tail = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->tail);
} else {
head = READ_ONCE(qp->r_rq.kwq->head);
tail = READ_ONCE(qp->r_rq.kwq->tail);
}
if (head >= qp->r_rq.size) if (head >= qp->r_rq.size)
head = 0; head = 0;
tail = wq->tail;
if (tail >= qp->r_rq.size) if (tail >= qp->r_rq.size)
tail = 0; tail = 0;
/* /*

View file

@ -52,7 +52,7 @@
#include "srq.h" #include "srq.h"
#include "vt.h" #include "vt.h"
#include "qp.h"
/** /**
* rvt_driver_srq_init - init srq resources on a per driver basis * rvt_driver_srq_init - init srq resources on a per driver basis
* @rdi: rvt dev structure * @rdi: rvt dev structure
@ -97,11 +97,8 @@ int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
srq->rq.max_sge = srq_init_attr->attr.max_sge; srq->rq.max_sge = srq_init_attr->attr.max_sge;
sz = sizeof(struct ib_sge) * srq->rq.max_sge + sz = sizeof(struct ib_sge) * srq->rq.max_sge +
sizeof(struct rvt_rwqe); sizeof(struct rvt_rwqe);
srq->rq.wq = udata ? if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz,
vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz) : dev->dparms.node, udata)) {
vzalloc_node(sizeof(struct rvt_rwq) + srq->rq.size * sz,
dev->dparms.node);
if (!srq->rq.wq) {
ret = -ENOMEM; ret = -ENOMEM;
goto bail_srq; goto bail_srq;
} }
@ -152,7 +149,7 @@ int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
bail_ip: bail_ip:
kfree(srq->ip); kfree(srq->ip);
bail_wq: bail_wq:
vfree(srq->rq.wq); rvt_free_rq(&srq->rq);
bail_srq: bail_srq:
return ret; return ret;
} }
@ -172,11 +169,12 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
{ {
struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device); struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
struct rvt_rwq *wq; struct rvt_rq tmp_rq = {};
int ret = 0; int ret = 0;
if (attr_mask & IB_SRQ_MAX_WR) { if (attr_mask & IB_SRQ_MAX_WR) {
struct rvt_rwq *owq; struct rvt_krwq *okwq = NULL;
struct rvt_rwq *owq = NULL;
struct rvt_rwqe *p; struct rvt_rwqe *p;
u32 sz, size, n, head, tail; u32 sz, size, n, head, tail;
@ -185,17 +183,12 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
((attr_mask & IB_SRQ_LIMIT) ? ((attr_mask & IB_SRQ_LIMIT) ?
attr->srq_limit : srq->limit) > attr->max_wr) attr->srq_limit : srq->limit) > attr->max_wr)
return -EINVAL; return -EINVAL;
sz = sizeof(struct rvt_rwqe) + sz = sizeof(struct rvt_rwqe) +
srq->rq.max_sge * sizeof(struct ib_sge); srq->rq.max_sge * sizeof(struct ib_sge);
size = attr->max_wr + 1; size = attr->max_wr + 1;
wq = udata ? if (rvt_alloc_rq(&tmp_rq, size * sz, dev->dparms.node,
vmalloc_user(sizeof(struct rvt_rwq) + size * sz) : udata))
vzalloc_node(sizeof(struct rvt_rwq) + size * sz,
dev->dparms.node);
if (!wq)
return -ENOMEM; return -ENOMEM;
/* Check that we can write the offset to mmap. */ /* Check that we can write the offset to mmap. */
if (udata && udata->inlen >= sizeof(__u64)) { if (udata && udata->inlen >= sizeof(__u64)) {
__u64 offset_addr; __u64 offset_addr;
@ -218,9 +211,15 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
* validate head and tail pointer values and compute * validate head and tail pointer values and compute
* the number of remaining WQEs. * the number of remaining WQEs.
*/ */
owq = srq->rq.wq; if (udata) {
head = owq->head; owq = srq->rq.wq;
tail = owq->tail; head = RDMA_READ_UAPI_ATOMIC(owq->head);
tail = RDMA_READ_UAPI_ATOMIC(owq->tail);
} else {
okwq = srq->rq.kwq;
head = okwq->head;
tail = okwq->tail;
}
if (head >= srq->rq.size || tail >= srq->rq.size) { if (head >= srq->rq.size || tail >= srq->rq.size) {
ret = -EINVAL; ret = -EINVAL;
goto bail_unlock; goto bail_unlock;
@ -235,7 +234,7 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
goto bail_unlock; goto bail_unlock;
} }
n = 0; n = 0;
p = wq->wq; p = tmp_rq.kwq->curr_wq;
while (tail != head) { while (tail != head) {
struct rvt_rwqe *wqe; struct rvt_rwqe *wqe;
int i; int i;
@ -250,22 +249,29 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
if (++tail >= srq->rq.size) if (++tail >= srq->rq.size)
tail = 0; tail = 0;
} }
srq->rq.wq = wq; srq->rq.kwq = tmp_rq.kwq;
if (udata) {
srq->rq.wq = tmp_rq.wq;
RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->head, n);
RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->tail, 0);
} else {
tmp_rq.kwq->head = n;
tmp_rq.kwq->tail = 0;
}
srq->rq.size = size; srq->rq.size = size;
wq->head = n;
wq->tail = 0;
if (attr_mask & IB_SRQ_LIMIT) if (attr_mask & IB_SRQ_LIMIT)
srq->limit = attr->srq_limit; srq->limit = attr->srq_limit;
spin_unlock_irq(&srq->rq.lock); spin_unlock_irq(&srq->rq.lock);
vfree(owq); vfree(owq);
kvfree(okwq);
if (srq->ip) { if (srq->ip) {
struct rvt_mmap_info *ip = srq->ip; struct rvt_mmap_info *ip = srq->ip;
struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device); struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device);
u32 s = sizeof(struct rvt_rwq) + size * sz; u32 s = sizeof(struct rvt_rwq) + size * sz;
rvt_update_mmap_info(dev, ip, s, wq); rvt_update_mmap_info(dev, ip, s, tmp_rq.wq);
/* /*
* Return the offset to mmap. * Return the offset to mmap.
@ -301,7 +307,7 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
bail_unlock: bail_unlock:
spin_unlock_irq(&srq->rq.lock); spin_unlock_irq(&srq->rq.lock);
bail_free: bail_free:
vfree(wq); rvt_free_rq(&tmp_rq);
return ret; return ret;
} }
@ -336,6 +342,5 @@ void rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
spin_unlock(&dev->n_srqs_lock); spin_unlock(&dev->n_srqs_lock);
if (srq->ip) if (srq->ip)
kref_put(&srq->ip->ref, rvt_release_mmap_info); kref_put(&srq->ip->ref, rvt_release_mmap_info);
else kvfree(srq->rq.kwq);
vfree(srq->rq.wq);
} }

View file

@ -52,6 +52,7 @@
#include <rdma/ib_pack.h> #include <rdma/ib_pack.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/rdmavt_cq.h> #include <rdma/rdmavt_cq.h>
#include <rdma/rvt-abi.h>
/* /*
* Atomic bit definitions for r_aflags. * Atomic bit definitions for r_aflags.
*/ */
@ -177,33 +178,27 @@ struct rvt_swqe {
struct rvt_sge sg_list[0]; struct rvt_sge sg_list[0];
}; };
/* /**
* Receive work request queue entry. * struct rvt_krwq - kernel struct receive work request
* The size of the sg_list is determined when the QP (or SRQ) is created * @head: index of next entry to fill
* and stored in qp->r_rq.max_sge (or srq->rq.max_sge). * @tail: index of next entry to pull
* @count: count is aproximate of total receive enteries posted
* @rvt_rwqe: struct of receive work request queue entry
*
* This structure is used to contain the head pointer,
* tail pointer and receive work queue entries for kernel
* mode user.
*/ */
struct rvt_rwqe { struct rvt_krwq {
u64 wr_id;
u8 num_sge;
struct ib_sge sg_list[0];
};
/*
* This structure is used to contain the head pointer, tail pointer,
* and receive work queue entries as a single memory allocation so
* it can be mmap'ed into user space.
* Note that the wq array elements are variable size so you can't
* just index into the array to get the N'th element;
* use get_rwqe_ptr() instead.
*/
struct rvt_rwq {
u32 head; /* new work requests posted to the head */ u32 head; /* new work requests posted to the head */
u32 tail; /* receives pull requests from here. */ u32 tail; /* receives pull requests from here. */
struct rvt_rwqe wq[0]; struct rvt_rwqe *curr_wq;
struct rvt_rwqe wq[];
}; };
struct rvt_rq { struct rvt_rq {
struct rvt_rwq *wq; struct rvt_rwq *wq;
struct rvt_krwq *kwq;
u32 size; /* size of RWQE array */ u32 size; /* size of RWQE array */
u8 max_sge; u8 max_sge;
/* protect changes in this struct */ /* protect changes in this struct */
@ -472,7 +467,7 @@ static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n) static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
{ {
return (struct rvt_rwqe *) return (struct rvt_rwqe *)
((char *)rq->wq->wq + ((char *)rq->kwq->curr_wq +
(sizeof(struct rvt_rwqe) + (sizeof(struct rvt_rwqe) +
rq->max_sge * sizeof(struct ib_sge)) * n); rq->max_sge * sizeof(struct ib_sge)) * n);
} }
@ -852,6 +847,21 @@ static inline u32 ib_cq_head(struct ib_cq *send_cq)
ibcq_to_rvtcq(send_cq)->kqueue->head; ibcq_to_rvtcq(send_cq)->kqueue->head;
} }
/**
* rvt_free_rq - free memory allocated for rvt_rq struct
* @rvt_rq: request queue data structure
*
* This function should only be called if the rvt_mmap_info()
* has not succeeded.
*/
static inline void rvt_free_rq(struct rvt_rq *rq)
{
kvfree(rq->kwq);
rq->kwq = NULL;
vfree(rq->wq);
rq->wq = NULL;
}
struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi, struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
u64 v, u64 v,
void (*cb)(struct rvt_qp *qp, u64 v)); void (*cb)(struct rvt_qp *qp, u64 v));

View file

@ -10,6 +10,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
#include <rdma/ib_verbs.h>
#ifndef RDMA_ATOMIC_UAPI #ifndef RDMA_ATOMIC_UAPI
#define RDMA_ATOMIC_UAPI(_type, _name) struct{ _type val; } _name #define RDMA_ATOMIC_UAPI(_type, _name) struct{ _type val; } _name
#endif #endif
@ -29,4 +30,32 @@ struct rvt_cq_wc {
struct ib_uverbs_wc uqueue[]; struct ib_uverbs_wc uqueue[];
}; };
/*
* Receive work request queue entry.
* The size of the sg_list is determined when the QP (or SRQ) is created
* and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
*/
struct rvt_rwqe {
__u64 wr_id;
__u8 num_sge;
__u8 padding[7];
struct ib_sge sg_list[];
};
/*
* This structure is used to contain the head pointer, tail pointer,
* and receive work queue entries as a single memory allocation so
* it can be mmap'ed into user space.
* Note that the wq array elements are variable size so you can't
* just index into the array to get the N'th element;
* use get_rwqe_ptr() for user space and rvt_get_rwqe_ptr()
* for kernel space.
*/
struct rvt_rwq {
/* new work requests posted to the head */
RDMA_ATOMIC_UAPI(__u32, head);
/* receives pull requests from here. */
RDMA_ATOMIC_UAPI(__u32, tail);
struct rvt_rwqe wq[];
};
#endif /* RVT_ABI_USER_H */ #endif /* RVT_ABI_USER_H */