IOMMU Fixes for Linux v6.2-rc3

Including:
 
 	- Core: Fix an iommu-group refcount leak
 
 	- Fix overflow issue in IOVA alloc path
 
 	- ARM-SMMU fixes from Will:
 
 	  - Fix VFIO regression on NXP SoCs by reporting IOMMU_CAP_CACHE_COHERENCY
 
 	  - Fix SMMU shutdown paths to avoid device unregistration race
 
 	- Error handling fix for Mediatek IOMMU driver
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAmPC1nsACgkQK/BELZcB
 GuN+aw//c4rOO4buNaG/T00HfSdSGq1VwG1aIicslC82IDnh28R4A0iGoGtlmXJo
 +2qC2VPQaH7SpU7WEjhwIjuXUyuYQF5gvrZFnrumHGRSYI7IYze793TAbsGA9bLV
 Wn20rygyLlptu+wnGYHIG9PkB041ysjqJtQpRvT5AvUYW3Z9BoNDWs5YwJ9Qfm+W
 pm781ctgURPSmNK+wKKkRh5CCteWRxhKh8FKMvQ9o6lAoJNB/dcPpyE2oJ+lMojm
 kKhONbvQe3DdRm/zNY3gV1chTDPNeyIHhDGc6/NA1oAjuETlhzOG3JIrroijzsnA
 dZOJSJ6/jzqA6ZBh5hhuyUSbB0rRAN2URnrO2eFfJaVw7GJH60pdA7asxu37gNuF
 umbtsdzBZW0xba3qL7tvASZnKZCVeEsR4D6Apb36eaR7h6U7X1kKXOAK1PqHVS7+
 LjT7RCMBx+UbKSpvT2ETMlLHpSDNA81X9yzssA4H7Cyk17NguB/L9Hd/I6uzbb26
 ZHI/mZRJ0d4DvXCzmQK4760A2TSfAPA9UiseevDNQHgb8ZYw0hWqJbTaKsF5UfUe
 MEi0Jd0djQek++oqX7lQar04hbWC6BJ/aY12eATNVVVcuPqJPluysczcQdat342s
 fZzNx5ghRsSLxCVyuu1pigmoikSCI/SdoPnO69Dw/86bWvS+Djs=
 =THhI
 -----END PGP SIGNATURE-----

Merge tag 'iommu-fixes-v6.2-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:

 - Core: Fix an iommu-group refcount leak

 - Fix overflow issue in IOVA alloc path

 - ARM-SMMU fixes from Will:
    - Fix VFIO regression on NXP SoCs by reporting IOMMU_CAP_CACHE_COHERENCY
    - Fix SMMU shutdown paths to avoid device unregistration race

 - Error handling fix for Mediatek IOMMU driver

* tag 'iommu-fixes-v6.2-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/mediatek-v1: Fix an error handling path in mtk_iommu_v1_probe()
  iommu/iova: Fix alloc iova overflows issue
  iommu: Fix refcount leak in iommu_device_claim_dma_owner
  iommu/arm-smmu-v3: Don't unregister on shutdown
  iommu/arm-smmu: Don't unregister on shutdown
  iommu/arm-smmu: Report IOMMU_CAP_CACHE_COHERENCY even betterer
This commit is contained in:
Linus Torvalds 2023-01-14 10:48:15 -06:00
commit 7c69844052
5 changed files with 35 additions and 17 deletions

View File

@ -3858,7 +3858,9 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
static void arm_smmu_device_shutdown(struct platform_device *pdev)
{
arm_smmu_device_remove(pdev);
struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
arm_smmu_device_disable(smmu);
}
static const struct of_device_id arm_smmu_of_match[] = {

View File

@ -1316,8 +1316,14 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
/* Assume that a coherent TCU implies coherent TBUs */
return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
/*
* It's overwhelmingly the case in practice that when the pagetable
* walk interface is connected to a coherent interconnect, all the
* translation interfaces are too. Furthermore if the device is
* natively coherent, then its translation interface must also be.
*/
return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
device_get_dma_attr(dev) == DEV_DMA_COHERENT;
case IOMMU_CAP_NOEXEC:
return true;
default:
@ -2185,19 +2191,16 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
return 0;
}
static int arm_smmu_device_remove(struct platform_device *pdev)
static void arm_smmu_device_shutdown(struct platform_device *pdev)
{
struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
if (!smmu)
return -ENODEV;
return;
if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
dev_notice(&pdev->dev, "disabling translation\n");
iommu_device_unregister(&smmu->iommu);
iommu_device_sysfs_remove(&smmu->iommu);
arm_smmu_rpm_get(smmu);
/* Turn the thing off */
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
@ -2209,12 +2212,21 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
clk_bulk_disable(smmu->num_clks, smmu->clks);
clk_bulk_unprepare(smmu->num_clks, smmu->clks);
return 0;
}
static void arm_smmu_device_shutdown(struct platform_device *pdev)
static int arm_smmu_device_remove(struct platform_device *pdev)
{
arm_smmu_device_remove(pdev);
struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
if (!smmu)
return -ENODEV;
iommu_device_unregister(&smmu->iommu);
iommu_device_sysfs_remove(&smmu->iommu);
arm_smmu_device_shutdown(pdev);
return 0;
}
static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)

View File

@ -3185,14 +3185,16 @@ EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner);
*/
int iommu_device_claim_dma_owner(struct device *dev, void *owner)
{
struct iommu_group *group = iommu_group_get(dev);
struct iommu_group *group;
int ret = 0;
if (!group)
return -ENODEV;
if (WARN_ON(!owner))
return -EINVAL;
group = iommu_group_get(dev);
if (!group)
return -ENODEV;
mutex_lock(&group->mutex);
if (group->owner_cnt) {
if (group->owner != owner) {

View File

@ -197,7 +197,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
curr = __get_cached_rbnode(iovad, limit_pfn);
curr_iova = to_iova(curr);
retry_pfn = curr_iova->pfn_hi + 1;
retry_pfn = curr_iova->pfn_hi;
retry:
do {
@ -211,7 +211,7 @@ retry:
if (high_pfn < size || new_pfn < low_pfn) {
if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
high_pfn = limit_pfn;
low_pfn = retry_pfn;
low_pfn = retry_pfn + 1;
curr = iova_find_limit(iovad, limit_pfn);
curr_iova = to_iova(curr);
goto retry;

View File

@ -683,7 +683,7 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev)
ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
dev_name(&pdev->dev));
if (ret)
return ret;
goto out_clk_unprepare;
ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev);
if (ret)
@ -698,6 +698,8 @@ out_dev_unreg:
iommu_device_unregister(&data->iommu);
out_sysfs_remove:
iommu_device_sysfs_remove(&data->iommu);
out_clk_unprepare:
clk_disable_unprepare(data->bclk);
return ret;
}