vfio: Remove calls to vfio_group_add_container_user()

When the open_device() op is called the container_users is incremented and
held incremented until close_device(). Thus, so long as drivers call
functions within their open_device()/close_device() region they do not
need to worry about the container_users.

These functions can all only be called between open_device() and
close_device():

  vfio_pin_pages()
  vfio_unpin_pages()
  vfio_dma_rw()
  vfio_register_notifier()
  vfio_unregister_notifier()

Eliminate the calls to vfio_group_add_container_user() and add
vfio_assert_device_open() to detect driver mis-use. This causes the
close_device() op to check device->open_count so always leave it elevated
while calling the op.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/7-v4-8045e76bf00b+13d-vfio_mdev_no_group_jgg@nvidia.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
Jason Gunthorpe 2022-05-11 13:13:00 -06:00 committed by Alex Williamson
parent 231657b345
commit eadd86f835
1 changed files with 17 additions and 63 deletions

View File

@ -1330,6 +1330,12 @@ static int vfio_group_add_container_user(struct vfio_group *group)
static const struct file_operations vfio_device_fops;
/* true if the vfio_device has open_device() called but not close_device() */
static bool vfio_assert_device_open(struct vfio_device *device)
{
return !WARN_ON_ONCE(!READ_ONCE(device->open_count));
}
static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
{
struct vfio_device *device;
@ -1544,8 +1550,10 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep)
struct vfio_device *device = filep->private_data;
mutex_lock(&device->dev_set->lock);
if (!--device->open_count && device->ops->close_device)
vfio_assert_device_open(device);
if (device->open_count == 1 && device->ops->close_device)
device->ops->close_device(device);
device->open_count--;
mutex_unlock(&device->dev_set->lock);
module_put(device->dev->driver->owner);
@ -2112,7 +2120,8 @@ int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
struct vfio_iommu_driver *driver;
int ret;
if (!user_pfn || !phys_pfn || !npage)
if (!user_pfn || !phys_pfn || !npage ||
!vfio_assert_device_open(device))
return -EINVAL;
if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
@ -2121,10 +2130,6 @@ int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
if (group->dev_counter > 1)
return -EINVAL;
ret = vfio_group_add_container_user(group);
if (ret)
return ret;
container = group->container;
driver = container->iommu_driver;
if (likely(driver && driver->ops->pin_pages))
@ -2134,8 +2139,6 @@ int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
else
ret = -ENOTTY;
vfio_group_try_dissolve_container(group);
return ret;
}
EXPORT_SYMBOL(vfio_pin_pages);
@ -2156,16 +2159,12 @@ int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn,
struct vfio_iommu_driver *driver;
int ret;
if (!user_pfn || !npage)
if (!user_pfn || !npage || !vfio_assert_device_open(device))
return -EINVAL;
if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
return -E2BIG;
ret = vfio_group_add_container_user(device->group);
if (ret)
return ret;
container = device->group->container;
driver = container->iommu_driver;
if (likely(driver && driver->ops->unpin_pages))
@ -2174,8 +2173,6 @@ int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn,
else
ret = -ENOTTY;
vfio_group_try_dissolve_container(device->group);
return ret;
}
EXPORT_SYMBOL(vfio_unpin_pages);
@ -2204,13 +2201,9 @@ int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova, void *data,
struct vfio_iommu_driver *driver;
int ret = 0;
if (!data || len <= 0)
if (!data || len <= 0 || !vfio_assert_device_open(device))
return -EINVAL;
ret = vfio_group_add_container_user(device->group);
if (ret)
return ret;
container = device->group->container;
driver = container->iommu_driver;
@ -2219,9 +2212,6 @@ int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova, void *data,
user_iova, data, len, write);
else
ret = -ENOTTY;
vfio_group_try_dissolve_container(device->group);
return ret;
}
EXPORT_SYMBOL(vfio_dma_rw);
@ -2234,10 +2224,6 @@ static int vfio_register_iommu_notifier(struct vfio_group *group,
struct vfio_iommu_driver *driver;
int ret;
ret = vfio_group_add_container_user(group);
if (ret)
return -EINVAL;
container = group->container;
driver = container->iommu_driver;
if (likely(driver && driver->ops->register_notifier))
@ -2245,9 +2231,6 @@ static int vfio_register_iommu_notifier(struct vfio_group *group,
events, nb);
else
ret = -ENOTTY;
vfio_group_try_dissolve_container(group);
return ret;
}
@ -2258,10 +2241,6 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group,
struct vfio_iommu_driver *driver;
int ret;
ret = vfio_group_add_container_user(group);
if (ret)
return -EINVAL;
container = group->container;
driver = container->iommu_driver;
if (likely(driver && driver->ops->unregister_notifier))
@ -2269,9 +2248,6 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group,
nb);
else
ret = -ENOTTY;
vfio_group_try_dissolve_container(group);
return ret;
}
@ -2300,10 +2276,6 @@ static int vfio_register_group_notifier(struct vfio_group *group,
if (*events)
return -EINVAL;
ret = vfio_group_add_container_user(group);
if (ret)
return -EINVAL;
ret = blocking_notifier_chain_register(&group->notifier, nb);
/*
@ -2313,25 +2285,6 @@ static int vfio_register_group_notifier(struct vfio_group *group,
if (!ret && set_kvm && group->kvm)
blocking_notifier_call_chain(&group->notifier,
VFIO_GROUP_NOTIFY_SET_KVM, group->kvm);
vfio_group_try_dissolve_container(group);
return ret;
}
static int vfio_unregister_group_notifier(struct vfio_group *group,
struct notifier_block *nb)
{
int ret;
ret = vfio_group_add_container_user(group);
if (ret)
return -EINVAL;
ret = blocking_notifier_chain_unregister(&group->notifier, nb);
vfio_group_try_dissolve_container(group);
return ret;
}
@ -2342,7 +2295,8 @@ int vfio_register_notifier(struct vfio_device *device,
struct vfio_group *group = device->group;
int ret;
if (!nb || !events || (*events == 0))
if (!nb || !events || (*events == 0) ||
!vfio_assert_device_open(device))
return -EINVAL;
switch (type) {
@ -2366,7 +2320,7 @@ int vfio_unregister_notifier(struct vfio_device *device,
struct vfio_group *group = device->group;
int ret;
if (!nb)
if (!nb || !vfio_assert_device_open(device))
return -EINVAL;
switch (type) {
@ -2374,7 +2328,7 @@ int vfio_unregister_notifier(struct vfio_device *device,
ret = vfio_unregister_iommu_notifier(group, nb);
break;
case VFIO_GROUP_NOTIFY:
ret = vfio_unregister_group_notifier(group, nb);
ret = blocking_notifier_chain_unregister(&group->notifier, nb);
break;
default:
ret = -EINVAL;