RDMA/vmw_pvrdma: Dont hardcode QP header page

Moved the header page count to a macro.

Reported-by: Yuval Shaia <yuval.shaia@oracle.com>
Signed-off-by: Adit Ranadive <aditr@vmware.com>
Reviewed-by: Aditya Sarwade <asarwade@vmware.com>
Tested-by: Andrew Boyer <andrew.boyer@dell.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Adit Ranadive 2017-02-22 17:22:57 -08:00 committed by Doug Ledford
parent 6332dee83d
commit e51c2fb033
2 changed files with 6 additions and 4 deletions

View file

@ -70,6 +70,7 @@
#define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820
#define PVRDMA_NUM_RING_PAGES 4
#define PVRDMA_QP_NUM_HEADER_PAGES 1
struct pvrdma_dev;

View file

@ -170,8 +170,9 @@ static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
sizeof(struct pvrdma_sge) *
qp->sq.max_sg);
/* Note: one extra page for the header. */
qp->npages_send = 1 + (qp->sq.wqe_cnt * qp->sq.wqe_size +
PAGE_SIZE - 1) / PAGE_SIZE;
qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES +
(qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) /
PAGE_SIZE;
return 0;
}
@ -288,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->npages = qp->npages_send + qp->npages_recv;
/* Skip header page. */
qp->sq.offset = PAGE_SIZE;
qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE;
/* Recv queue pages are after send pages. */
qp->rq.offset = qp->npages_send * PAGE_SIZE;
@ -341,7 +342,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type);
cmd->access_flags = IB_ACCESS_LOCAL_WRITE;
cmd->total_chunks = qp->npages;
cmd->send_chunks = qp->npages_send - 1;
cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES;
cmd->pdir_dma = qp->pdir.dir_dma;
dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n",