iommu/amd/io-pgtable: Implement unmap_pages io_pgtable_ops callback

Implement the io_pgtable_ops->unmap_pages() callback for AMD driver
and deprecate io_pgtable_ops->unmap callback.

Also if fetch_pte() returns NULL then return from unmap_mapages()
instead of trying to continue to unmap remaining pages.

Suggested-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
Link: https://lore.kernel.org/r/20220825063939.8360-3-vasant.hegde@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Vasant Hegde 2022-08-25 06:39:32 +00:00 committed by Joerg Roedel
parent 8cc233dec3
commit 251c4db699

View file

@ -435,17 +435,18 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
return ret;
}
static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
unsigned long iova,
size_t size,
struct iommu_iotlb_gather *gather)
static unsigned long iommu_v1_unmap_pages(struct io_pgtable_ops *ops,
unsigned long iova,
size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *gather)
{
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
unsigned long long unmapped;
unsigned long unmap_size;
u64 *pte;
size_t size = pgcount << __ffs(pgsize);
BUG_ON(!is_power_of_2(size));
BUG_ON(!is_power_of_2(pgsize));
unmapped = 0;
@ -457,14 +458,14 @@ static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
count = PAGE_SIZE_PTE_COUNT(unmap_size);
for (i = 0; i < count; i++)
pte[i] = 0ULL;
} else {
return unmapped;
}
iova = (iova & ~(unmap_size - 1)) + unmap_size;
unmapped += unmap_size;
}
BUG_ON(unmapped && !is_power_of_2(unmapped));
return unmapped;
}
@ -524,7 +525,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
cfg->tlb = &v1_flush_ops;
pgtable->iop.ops.map_pages = iommu_v1_map_pages;
pgtable->iop.ops.unmap = iommu_v1_unmap_page;
pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages;
pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
return &pgtable->iop;