RDMA/hns: Optimize base address table config flow for qp buffer

Currently, before the qp is created, a page size needs to be calculated
for the base address table to store all base addresses in the mtr. As a
result, the parameter configuration of the mtr is complex. So integrate
the process of calculating the base table page size into the hem related
interface to simplify the process of using mtr.

Link: https://lore.kernel.org/r/1583839084-31579-5-git-send-email-liweihang@huawei.com
Signed-off-by: Xi Wang <wangxi11@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Xi Wang 2020-03-10 19:18:03 +08:00 committed by Jason Gunthorpe
parent e363f7de4e
commit 1133401412
3 changed files with 21 additions and 41 deletions

View file

@ -669,10 +669,6 @@ struct hns_roce_qp {
struct ib_umem *umem;
struct hns_roce_mtt mtt;
struct hns_roce_mtr mtr;
/* this define must less than HNS_ROCE_MAX_BT_REGION */
#define HNS_ROCE_WQE_REGION_MAX 3
struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX];
int wqe_bt_pg_shift;
u32 buff_size;

View file

@ -1383,6 +1383,7 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
void *cpu_base;
u64 phy_base;
int ret = 0;
int ba_num;
int offset;
int total;
int step;
@ -1393,12 +1394,16 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
if (root_hem)
return 0;
ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit);
if (ba_num < 1)
return -ENOMEM;
INIT_LIST_HEAD(&temp_root);
total = r->offset;
offset = r->offset;
/* indicate to last region */
r = &regions[region_cnt - 1];
root_hem = hem_list_alloc_item(hr_dev, total, r->offset + r->count - 1,
unit, true, 0);
root_hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
ba_num, true, 0);
if (!root_hem)
return -ENOMEM;
list_add(&root_hem->list, &temp_root);
@ -1410,7 +1415,7 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
INIT_LIST_HEAD(&temp_list[i]);
total = 0;
for (i = 0; i < region_cnt && total < unit; i++) {
for (i = 0; i < region_cnt && total < ba_num; i++) {
r = &regions[i];
if (!r->count)
continue;
@ -1443,7 +1448,8 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
/* if exist mid bt, link L1 to L0 */
list_for_each_entry_safe(hem, temp_hem,
&hem_list->mid_bt[i][1], list) {
offset = hem->start / step * BA_BYTE_LEN;
offset = (hem->start - r->offset) / step *
BA_BYTE_LEN;
hem_list_link_bt(hr_dev, cpu_base + offset,
hem->dma_addr);
total++;

View file

@ -579,30 +579,6 @@ static int split_wqe_buf_region(struct hns_roce_dev *hr_dev,
return region_cnt;
}
static int calc_wqe_bt_page_shift(struct hns_roce_dev *hr_dev,
struct hns_roce_buf_region *regions,
int region_cnt)
{
int bt_pg_shift;
int ba_num;
int ret;
bt_pg_shift = PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz;
/* all root ba entries must in one bt page */
do {
ba_num = (1 << bt_pg_shift) / BA_BYTE_LEN;
ret = hns_roce_hem_list_calc_root_ba(regions, region_cnt,
ba_num);
if (ret <= ba_num)
break;
bt_pg_shift++;
} while (ret > ba_num);
return bt_pg_shift - PAGE_SHIFT;
}
static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp)
{
@ -768,7 +744,10 @@ static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
static int map_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
u32 page_shift, bool is_user)
{
dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL };
/* WQE buffer include 3 parts: SQ, extend SGE and RQ. */
#define HNS_ROCE_WQE_REGION_MAX 3
struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX] = {};
dma_addr_t *buf_list[HNS_ROCE_WQE_REGION_MAX] = {};
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_region *r;
int region_count;
@ -776,18 +755,18 @@ static int map_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
int ret;
int i;
region_count = split_wqe_buf_region(hr_dev, hr_qp, hr_qp->regions,
ARRAY_SIZE(hr_qp->regions), page_shift);
region_count = split_wqe_buf_region(hr_dev, hr_qp, regions,
ARRAY_SIZE(regions), page_shift);
/* alloc a tmp list to store WQE buffers address */
ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list, region_count);
ret = hns_roce_alloc_buf_list(regions, buf_list, region_count);
if (ret) {
ibdev_err(ibdev, "Failed to alloc WQE buffer list\n");
return ret;
}
for (i = 0; i < region_count; i++) {
r = &hr_qp->regions[i];
r = &regions[i];
if (is_user)
buf_count = hns_roce_get_umem_bufs(hr_dev, buf_list[i],
r->count, r->offset, hr_qp->umem,
@ -805,11 +784,10 @@ static int map_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
}
}
hr_qp->wqe_bt_pg_shift = calc_wqe_bt_page_shift(hr_dev, hr_qp->regions,
region_count);
hr_qp->wqe_bt_pg_shift = hr_dev->caps.mtt_ba_pg_sz;
hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift,
page_shift);
ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, hr_qp->regions,
ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, regions,
region_count);
if (ret)
ibdev_err(ibdev, "Failed to attach WQE's mtr\n");