Merge branches 'v5.10/vfio/bardirty', 'v5.10/vfio/dma_avail', 'v5.10/vfio/misc', 'v5.10/vfio/no-cmd-mem' and 'v5.10/vfio/yan_zhao_fixes' into v5.10/vfio/next

This commit is contained in:
Alex Williamson 2020-09-22 10:56:51 -06:00
8 changed files with 61 additions and 17 deletions

View file

@ -197,9 +197,10 @@ void pcibios_bus_add_device(struct pci_dev *pdev)
* With pdev->no_vf_scan the common PCI probing code does not
* perform PF/VF linking.
*/
if (zdev->vfn)
if (zdev->vfn) {
zpci_bus_setup_virtfn(zdev->zbus, pdev, zdev->vfn);
pdev->no_command_memory = 1;
}
}
static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)

View file

@ -180,6 +180,7 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
virtfn->device = iov->vf_device;
virtfn->is_virtfn = 1;
virtfn->physfn = pci_dev_get(dev);
virtfn->no_command_memory = 1;
if (id == 0)
pci_read_vf_config_common(virtfn);

View file

@ -1862,7 +1862,6 @@ static const struct vfio_device_ops vfio_pci_ops = {
static int vfio_pci_reflck_attach(struct vfio_pci_device *vdev);
static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck);
static struct pci_driver vfio_pci_driver;
static int vfio_pci_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)

View file

@ -406,7 +406,7 @@ bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
* PF SR-IOV capability, there's therefore no need to trigger
* faults based on the virtual value.
*/
return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY);
return pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY);
}
/*
@ -523,8 +523,8 @@ static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos,
count = vfio_default_config_read(vdev, pos, count, perm, offset, val);
/* Mask in virtual memory enable for SR-IOV devices */
if (offset == PCI_COMMAND && vdev->pdev->is_virtfn) {
/* Mask in virtual memory enable */
if (offset == PCI_COMMAND && vdev->pdev->no_command_memory) {
u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
u32 tmp_val = le32_to_cpu(*val);
@ -592,9 +592,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
* shows it disabled (phys_mem/io, then the device has
* undergone some kind of backdoor reset and needs to be
* restored before we allow it to enable the bars.
* SR-IOV devices will trigger this, but we catch them later
* SR-IOV devices will trigger this - for mem enable let's
* catch this now and for io enable it will be caught later
*/
if ((new_mem && virt_mem && !phys_mem) ||
if ((new_mem && virt_mem && !phys_mem &&
!pdev->no_command_memory) ||
(new_io && virt_io && !phys_io) ||
vfio_need_bar_restore(vdev))
vfio_bar_restore(vdev);
@ -1737,12 +1739,14 @@ int vfio_config_init(struct vfio_pci_device *vdev)
vconfig[PCI_INTERRUPT_PIN]);
vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
}
if (pdev->no_command_memory) {
/*
* VFs do no implement the memory enable bit of the COMMAND
* register therefore we'll not have it set in our initial
* copy of config space after pci_enable_device(). For
* consistency with PFs, set the virtual enable bit here.
* VFs and devices that set pdev->no_command_memory do not
* implement the memory enable bit of the COMMAND register
* therefore we'll not have it set in our initial copy of
* config space after pci_enable_device(). For consistency
* with PFs, set the virtual enable bit here.
*/
*(__le16 *)&vconfig[PCI_COMMAND] |=
cpu_to_le16(PCI_COMMAND_MEMORY);

View file

@ -1949,8 +1949,10 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
if (!group)
return -ENODEV;
if (group->dev_counter > 1)
return -EINVAL;
if (group->dev_counter > 1) {
ret = -EINVAL;
goto err_pin_pages;
}
ret = vfio_group_add_container_user(group);
if (ret)
@ -2051,6 +2053,9 @@ int vfio_group_pin_pages(struct vfio_group *group,
if (!group || !user_iova_pfn || !phys_pfn || !npage)
return -EINVAL;
if (group->dev_counter > 1)
return -EINVAL;
if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
return -E2BIG;

View file

@ -2609,6 +2609,20 @@ static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig));
}
static int vfio_iommu_dma_avail_build_caps(struct vfio_iommu *iommu,
struct vfio_info_cap *caps)
{
struct vfio_iommu_type1_info_dma_avail cap_dma_avail;
cap_dma_avail.header.id = VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL;
cap_dma_avail.header.version = 1;
cap_dma_avail.avail = iommu->dma_avail;
return vfio_info_add_capability(caps, &cap_dma_avail.header,
sizeof(cap_dma_avail));
}
static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu,
unsigned long arg)
{
@ -2641,6 +2655,9 @@ static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu,
ret = vfio_iommu_migration_build_caps(iommu, &caps);
if (!ret)
ret = vfio_iommu_dma_avail_build_caps(iommu, &caps);
if (!ret)
ret = vfio_iommu_iova_build_caps(iommu, &caps);
@ -2933,7 +2950,8 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
* size
*/
bitmap_set(dma->bitmap, offset >> pgshift,
*copied >> pgshift);
((offset + *copied - 1) >> pgshift) -
(offset >> pgshift) + 1);
}
} else
*copied = copy_from_user(data, (void __user *)vaddr,

View file

@ -445,6 +445,7 @@ struct pci_dev {
unsigned int is_probed:1; /* Device probing in progress */
unsigned int link_active_reporting:1;/* Device capable of reporting link active */
unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */

View file

@ -462,7 +462,7 @@ struct vfio_region_gfx_edid {
* 5. Resumed
* |--------->|
*
* 0. Default state of VFIO device is _RUNNNG when the user application starts.
* 0. Default state of VFIO device is _RUNNING when the user application starts.
* 1. During normal shutdown of the user application, the user application may
* optionally change the VFIO device state from _RUNNING to _STOP. This
* transition is optional. The vendor driver must support this transition but
@ -1039,6 +1039,21 @@ struct vfio_iommu_type1_info_cap_migration {
__u64 max_dirty_bitmap_size; /* in bytes */
};
/*
* The DMA available capability allows to report the current number of
* simultaneously outstanding DMA mappings that are allowed.
*
* The structure below defines version 1 of this capability.
*
* avail: specifies the current number of outstanding DMA mappings allowed.
*/
#define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3
struct vfio_iommu_type1_info_dma_avail {
struct vfio_info_cap_header header;
__u32 avail;
};
#define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
/**