mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 00:48:50 +00:00
vfio/pci: Remove vfio_device_get_from_dev()
The last user of this function is in PCI callbacks that want to convert their struct pci_dev to a vfio_device. Instead of searching use the vfio_device available trivially through the drvdata. When a callback in the device_driver is called, the caller must hold the device_lock() on dev. The purpose of the device_lock is to prevent remove() from being called (see __device_release_driver), and allow the driver to safely interact with its drvdata without races. The PCI core correctly follows this and holds the device_lock() when calling error_detected (see report_error_detected) and sriov_configure (see sriov_numvfs_store). Further, since the drvdata holds a positive refcount on the vfio_device any access of the drvdata, under the device_lock(), from a driver callback needs no further protection or refcounting. Thus the remark in the vfio_device_get_from_dev() comment does not apply here, VFIO PCI drivers all call vfio_unregister_group_dev() from their remove callbacks under the device_lock() and cannot race with the remaining callers. Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/2-v4-c841817a0349+8f-vfio_get_from_dev_jgg@nvidia.com Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
parent
91be0bd6c6
commit
ff806cbd90
5 changed files with 12 additions and 70 deletions
|
@ -174,10 +174,12 @@ static void vfio_pci_remove(struct pci_dev *pdev)
|
|||
|
||||
static int vfio_pci_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
|
||||
{
|
||||
struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
|
||||
|
||||
if (!enable_sriov)
|
||||
return -ENOENT;
|
||||
|
||||
return vfio_pci_core_sriov_configure(pdev, nr_virtfn);
|
||||
return vfio_pci_core_sriov_configure(vdev, nr_virtfn);
|
||||
}
|
||||
|
||||
static const struct pci_device_id vfio_pci_table[] = {
|
||||
|
|
|
@ -1894,9 +1894,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_register_device);
|
|||
|
||||
void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
|
||||
vfio_pci_core_sriov_configure(pdev, 0);
|
||||
vfio_pci_core_sriov_configure(vdev, 0);
|
||||
|
||||
vfio_unregister_group_dev(&vdev->vdev);
|
||||
|
||||
|
@ -1911,14 +1909,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_unregister_device);
|
|||
pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
|
||||
pci_channel_state_t state)
|
||||
{
|
||||
struct vfio_pci_core_device *vdev;
|
||||
struct vfio_device *device;
|
||||
|
||||
device = vfio_device_get_from_dev(&pdev->dev);
|
||||
if (device == NULL)
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
||||
vdev = container_of(device, struct vfio_pci_core_device, vdev);
|
||||
struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
|
||||
|
||||
mutex_lock(&vdev->igate);
|
||||
|
||||
|
@ -1927,26 +1918,18 @@ pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
|
|||
|
||||
mutex_unlock(&vdev->igate);
|
||||
|
||||
vfio_device_put(device);
|
||||
|
||||
return PCI_ERS_RESULT_CAN_RECOVER;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected);
|
||||
|
||||
int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
|
||||
int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
|
||||
int nr_virtfn)
|
||||
{
|
||||
struct vfio_pci_core_device *vdev;
|
||||
struct vfio_device *device;
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
int ret = 0;
|
||||
|
||||
device_lock_assert(&pdev->dev);
|
||||
|
||||
device = vfio_device_get_from_dev(&pdev->dev);
|
||||
if (!device)
|
||||
return -ENODEV;
|
||||
|
||||
vdev = container_of(device, struct vfio_pci_core_device, vdev);
|
||||
|
||||
if (nr_virtfn) {
|
||||
mutex_lock(&vfio_pci_sriov_pfs_mutex);
|
||||
/*
|
||||
|
@ -1964,8 +1947,7 @@ int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
|
|||
ret = pci_enable_sriov(pdev, nr_virtfn);
|
||||
if (ret)
|
||||
goto out_del;
|
||||
ret = nr_virtfn;
|
||||
goto out_put;
|
||||
return nr_virtfn;
|
||||
}
|
||||
|
||||
pci_disable_sriov(pdev);
|
||||
|
@ -1975,8 +1957,6 @@ int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
|
|||
list_del_init(&vdev->sriov_pfs_item);
|
||||
out_unlock:
|
||||
mutex_unlock(&vfio_pci_sriov_pfs_mutex);
|
||||
out_put:
|
||||
vfio_device_put(device);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
|
||||
|
|
|
@ -473,31 +473,15 @@ static void vfio_group_get(struct vfio_group *group)
|
|||
refcount_inc(&group->users);
|
||||
}
|
||||
|
||||
static struct vfio_group *vfio_group_get_from_dev(struct device *dev)
|
||||
{
|
||||
struct iommu_group *iommu_group;
|
||||
struct vfio_group *group;
|
||||
|
||||
iommu_group = iommu_group_get(dev);
|
||||
if (!iommu_group)
|
||||
return NULL;
|
||||
|
||||
group = vfio_group_get_from_iommu(iommu_group);
|
||||
iommu_group_put(iommu_group);
|
||||
|
||||
return group;
|
||||
}
|
||||
|
||||
/*
|
||||
* Device objects - create, release, get, put, search
|
||||
*/
|
||||
/* Device reference always implies a group reference */
|
||||
void vfio_device_put(struct vfio_device *device)
|
||||
static void vfio_device_put(struct vfio_device *device)
|
||||
{
|
||||
if (refcount_dec_and_test(&device->refcount))
|
||||
complete(&device->comp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vfio_device_put);
|
||||
|
||||
static bool vfio_device_try_get(struct vfio_device *device)
|
||||
{
|
||||
|
@ -831,29 +815,6 @@ int vfio_register_emulated_iommu_dev(struct vfio_device *device)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev);
|
||||
|
||||
/*
|
||||
* Get a reference to the vfio_device for a device. Even if the
|
||||
* caller thinks they own the device, they could be racing with a
|
||||
* release call path, so we can't trust drvdata for the shortcut.
|
||||
* Go the long way around, from the iommu_group to the vfio_group
|
||||
* to the vfio_device.
|
||||
*/
|
||||
struct vfio_device *vfio_device_get_from_dev(struct device *dev)
|
||||
{
|
||||
struct vfio_group *group;
|
||||
struct vfio_device *device;
|
||||
|
||||
group = vfio_group_get_from_dev(dev);
|
||||
if (!group)
|
||||
return NULL;
|
||||
|
||||
device = vfio_group_get_device(group, dev);
|
||||
vfio_group_put(group);
|
||||
|
||||
return device;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
|
||||
|
||||
static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
|
||||
char *buf)
|
||||
{
|
||||
|
|
|
@ -125,8 +125,6 @@ void vfio_uninit_group_dev(struct vfio_device *device);
|
|||
int vfio_register_group_dev(struct vfio_device *device);
|
||||
int vfio_register_emulated_iommu_dev(struct vfio_device *device);
|
||||
void vfio_unregister_group_dev(struct vfio_device *device);
|
||||
extern struct vfio_device *vfio_device_get_from_dev(struct device *dev);
|
||||
extern void vfio_device_put(struct vfio_device *device);
|
||||
|
||||
int vfio_assign_device_set(struct vfio_device *device, void *set_id);
|
||||
|
||||
|
|
|
@ -227,8 +227,9 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
|
|||
int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev);
|
||||
void vfio_pci_core_uninit_device(struct vfio_pci_core_device *vdev);
|
||||
void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev);
|
||||
int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn);
|
||||
extern const struct pci_error_handlers vfio_pci_core_err_handlers;
|
||||
int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
|
||||
int nr_virtfn);
|
||||
long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
|
||||
|
|
Loading…
Reference in a new issue