RDMA/umem: Add rdma_umem_for_each_dma_block()

This helper does the same as rdma_for_each_block(), except it works on a
umem. This simplifies most of the call sites.

Link: https://lore.kernel.org/r/4-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com
Acked-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
Acked-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Jason Gunthorpe 2020-09-04 19:41:45 -03:00
parent 3361c29e92
commit ebc24096c4
6 changed files with 25 additions and 7 deletions

View File

@ -415,6 +415,7 @@ ForEachMacros:
- 'rbtree_postorder_for_each_entry_safe'
- 'rdma_for_each_block'
- 'rdma_for_each_port'
- 'rdma_umem_for_each_dma_block'
- 'resource_list_for_each_entry'
- 'resource_list_for_each_entry_safe'
- 'rhl_for_each_entry_rcu'

View File

@ -3787,7 +3787,7 @@ static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
u64 page_size = BIT_ULL(page_shift);
struct ib_block_iter biter;
rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, page_size)
rdma_umem_for_each_dma_block(umem, &biter, page_size)
*pbl_tbl++ = rdma_block_iter_dma_address(&biter);
return pbl_tbl - pbl_tbl_orig;

View File

@ -1144,8 +1144,7 @@ static int umem_to_page_list(struct efa_dev *dev,
ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
hp_cnt, pages_in_hp);
rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
BIT(hp_shift))
rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift))
page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
return 0;

View File

@ -268,8 +268,7 @@ int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
}
/* convert system page cnt to hw page cnt */
rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
1 << page_shift) {
rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) {
addr = rdma_block_iter_dma_address(&biter);
if (idx >= start) {
bufs[total++] = addr;

View File

@ -1322,8 +1322,7 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
if (iwmr->type == IW_MEMREG_TYPE_QP)
iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
rdma_for_each_block(region->sg_head.sgl, &biter, region->nmap,
iwmr->page_size) {
rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
*pbl = rdma_block_iter_dma_address(&biter);
pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
}

View File

@ -40,6 +40,26 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem)
PAGE_SHIFT;
}
static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
struct ib_umem *umem,
unsigned long pgsz)
{
__rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz);
}
/**
* rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
* @umem: umem to iterate over
* @pgsz: Page size to split the list into
*
* pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
* returned DMA blocks will be aligned to pgsz and span the range:
* ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
*/
#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
__rdma_block_iter_next(biter);)
#ifdef CONFIG_INFINIBAND_USER_MEM
struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,