Merge branch 'pci/host-vmd'

- Save VMD's pci_dev in x86 struct pci_sysdata (Jon Derrick)

  - Add pci_real_dma_dev() for DMA aliases not on the same bus as requester
    (Jon Derrick)

  - Add IOMMU mappings for pci_real_dma_dev() (Jon Derrick)

  - Remove IOMMU sanity checks for VMD devices (Jon Derrick)

  - Remove VMD dma_map_ops overrides (Jon Derrick)

  - Remove unused X86_DEV_DMA_OPS (Christoph Hellwig)

  - Add VMD device IDs that need bus restriction mode (Sushma Kalakota)

* pci/host-vmd:
  PCI: vmd: Add two VMD Device IDs
  x86/PCI: Remove X86_DEV_DMA_OPS
  PCI: vmd: Remove dma_map_ops overrides
  iommu/vt-d: Remove VMD child device sanity check
  iommu/vt-d: Use pci_real_dma_dev() for mapping
  PCI: Introduce pci_real_dma_dev()
  x86/PCI: Expose VMD's pci_dev in struct pci_sysdata
  x86/PCI: Add to_pci_sysdata() helper
This commit is contained in:
Bjorn Helgaas 2020-01-29 17:00:02 -06:00
commit db83c269d2
10 changed files with 58 additions and 228 deletions

View file

@ -2955,9 +2955,6 @@ config HAVE_ATOMIC_IOMAP
def_bool y
depends on X86_32
config X86_DEV_DMA_OPS
bool
source "drivers/firmware/Kconfig"
source "arch/x86/kvm/Kconfig"

View file

@ -8,16 +8,6 @@ struct dev_archdata {
#endif
};
#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
struct dma_domain {
struct list_head node;
const struct dma_map_ops *dma_ops;
int domain_nr;
};
void add_dma_domain(struct dma_domain *domain);
void del_dma_domain(struct dma_domain *domain);
#endif
struct pdev_archdata {
};

View file

@ -25,7 +25,7 @@ struct pci_sysdata {
void *fwnode; /* IRQ domain for MSI assignment */
#endif
#if IS_ENABLED(CONFIG_VMD)
bool vmd_domain; /* True if in Intel VMD domain */
struct pci_dev *vmd_dev; /* VMD Device if in Intel VMD domain */
#endif
};
@ -35,12 +35,15 @@ extern int noioapicreroute;
#ifdef CONFIG_PCI
static inline struct pci_sysdata *to_pci_sysdata(const struct pci_bus *bus)
{
return bus->sysdata;
}
#ifdef CONFIG_PCI_DOMAINS
static inline int pci_domain_nr(struct pci_bus *bus)
{
struct pci_sysdata *sd = bus->sysdata;
return sd->domain;
return to_pci_sysdata(bus)->domain;
}
static inline int pci_proc_domain(struct pci_bus *bus)
@ -52,24 +55,20 @@ static inline int pci_proc_domain(struct pci_bus *bus)
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
static inline void *_pci_root_bus_fwnode(struct pci_bus *bus)
{
struct pci_sysdata *sd = bus->sysdata;
return sd->fwnode;
return to_pci_sysdata(bus)->fwnode;
}
#define pci_root_bus_fwnode _pci_root_bus_fwnode
#endif
#if IS_ENABLED(CONFIG_VMD)
static inline bool is_vmd(struct pci_bus *bus)
{
#if IS_ENABLED(CONFIG_VMD)
struct pci_sysdata *sd = bus->sysdata;
return sd->vmd_domain;
#else
return false;
#endif
return to_pci_sysdata(bus)->vmd_dev != NULL;
}
#else
#define is_vmd(bus) false
#endif /* CONFIG_VMD */
/* Can be used to override the logic in pci_scan_bus for skipping
already-configured bus numbers - to be used for buggy BIOSes
@ -124,9 +123,7 @@ void native_restore_msi_irqs(struct pci_dev *dev);
/* Returns the node based on pci bus */
static inline int __pcibus_to_node(const struct pci_bus *bus)
{
const struct pci_sysdata *sd = bus->sysdata;
return sd->node;
return to_pci_sysdata(bus)->node;
}
static inline const struct cpumask *

View file

@ -625,43 +625,6 @@ unsigned int pcibios_assign_all_busses(void)
return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
}
#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
static LIST_HEAD(dma_domain_list);
static DEFINE_SPINLOCK(dma_domain_list_lock);
void add_dma_domain(struct dma_domain *domain)
{
spin_lock(&dma_domain_list_lock);
list_add(&domain->node, &dma_domain_list);
spin_unlock(&dma_domain_list_lock);
}
EXPORT_SYMBOL_GPL(add_dma_domain);
void del_dma_domain(struct dma_domain *domain)
{
spin_lock(&dma_domain_list_lock);
list_del(&domain->node);
spin_unlock(&dma_domain_list_lock);
}
EXPORT_SYMBOL_GPL(del_dma_domain);
static void set_dma_domain_ops(struct pci_dev *pdev)
{
struct dma_domain *domain;
spin_lock(&dma_domain_list_lock);
list_for_each_entry(domain, &dma_domain_list, node) {
if (pci_domain_nr(pdev->bus) == domain->domain_nr) {
pdev->dev.dma_ops = domain->dma_ops;
break;
}
}
spin_unlock(&dma_domain_list_lock);
}
#else
static void set_dma_domain_ops(struct pci_dev *pdev) {}
#endif
static void set_dev_domain_options(struct pci_dev *pdev)
{
if (is_vmd(pdev->bus))
@ -697,7 +660,6 @@ int pcibios_add_device(struct pci_dev *dev)
pa_data = data->next;
memunmap(data);
}
set_dma_domain_ops(dev);
set_dev_domain_options(dev);
return 0;
}
@ -736,3 +698,13 @@ int pci_ext_cfg_avail(void)
else
return 0;
}
#if IS_ENABLED(CONFIG_VMD)
struct pci_dev *pci_real_dma_dev(struct pci_dev *dev)
{
if (is_vmd(dev->bus))
return to_pci_sysdata(dev->bus)->vmd_dev;
return dev;
}
#endif

View file

@ -774,13 +774,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
if (dev_is_pci(dev)) {
struct pci_dev *pf_pdev;
pdev = to_pci_dev(dev);
#ifdef CONFIG_X86
/* VMD child devices currently cannot be handled individually */
if (is_vmd(pdev->bus))
return NULL;
#endif
pdev = pci_real_dma_dev(to_pci_dev(dev));
/* VFs aren't listed in scope tables; we need to look up
* the PF instead to find the IOMMU. */
@ -2428,6 +2422,9 @@ static struct dmar_domain *find_domain(struct device *dev)
dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO))
return NULL;
if (dev_is_pci(dev))
dev = &pci_real_dma_dev(to_pci_dev(dev))->dev;
/* No lock here, assumes no domain exit in normal case */
info = dev->archdata.iommu;
if (likely(info))

View file

@ -239,7 +239,6 @@ config PCIE_TANGO_SMP8759
config VMD
depends on PCI_MSI && X86_64 && SRCU
select X86_DEV_DMA_OPS
tristate "Intel Volume Management Device Driver"
---help---
Adds support for the Intel Volume Management Device (VMD). VMD is a

View file

@ -98,9 +98,6 @@ struct vmd_dev {
struct irq_domain *irq_domain;
struct pci_bus *bus;
u8 busn_start;
struct dma_map_ops dma_ops;
struct dma_domain dma_domain;
};
static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
@ -295,151 +292,6 @@ static struct msi_domain_info vmd_msi_domain_info = {
.chip = &vmd_msi_controller,
};
/*
* VMD replaces the requester ID with its own. DMA mappings for devices in a
* VMD domain need to be mapped for the VMD, not the device requiring
* the mapping.
*/
static struct device *to_vmd_dev(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
return &vmd->dev->dev;
}
static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
gfp_t flag, unsigned long attrs)
{
return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs);
}
static void vmd_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t addr, unsigned long attrs)
{
return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs);
}
static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t addr, size_t size,
unsigned long attrs)
{
return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size,
attrs);
}
static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t addr, size_t size,
unsigned long attrs)
{
return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size,
attrs);
}
static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir,
attrs);
}
static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs);
}
static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
}
static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
}
static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
}
static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir);
}
static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
}
static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
}
static int vmd_dma_supported(struct device *dev, u64 mask)
{
return dma_supported(to_vmd_dev(dev), mask);
}
static u64 vmd_get_required_mask(struct device *dev)
{
return dma_get_required_mask(to_vmd_dev(dev));
}
static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
{
struct dma_domain *domain = &vmd->dma_domain;
if (get_dma_ops(&vmd->dev->dev))
del_dma_domain(domain);
}
#define ASSIGN_VMD_DMA_OPS(source, dest, fn) \
do { \
if (source->fn) \
dest->fn = vmd_##fn; \
} while (0)
static void vmd_setup_dma_ops(struct vmd_dev *vmd)
{
const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev);
struct dma_map_ops *dest = &vmd->dma_ops;
struct dma_domain *domain = &vmd->dma_domain;
domain->domain_nr = vmd->sysdata.domain;
domain->dma_ops = dest;
if (!source)
return;
ASSIGN_VMD_DMA_OPS(source, dest, alloc);
ASSIGN_VMD_DMA_OPS(source, dest, free);
ASSIGN_VMD_DMA_OPS(source, dest, mmap);
ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable);
ASSIGN_VMD_DMA_OPS(source, dest, map_page);
ASSIGN_VMD_DMA_OPS(source, dest, unmap_page);
ASSIGN_VMD_DMA_OPS(source, dest, map_sg);
ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg);
ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu);
ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
add_dma_domain(domain);
}
#undef ASSIGN_VMD_DMA_OPS
static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
unsigned int devfn, int reg, int len)
{
@ -679,7 +531,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
.parent = res,
};
sd->vmd_domain = true;
sd->vmd_dev = vmd->dev;
sd->domain = vmd_find_free_domain();
if (sd->domain < 0)
return sd->domain;
@ -709,7 +561,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
}
vmd_attach_resources(vmd);
vmd_setup_dma_ops(vmd);
dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
pci_scan_child_bus(vmd->bus);
@ -824,7 +675,6 @@ static void vmd_remove(struct pci_dev *dev)
pci_stop_root_bus(vmd->bus);
pci_remove_root_bus(vmd->bus);
vmd_cleanup_srcu(vmd);
vmd_teardown_dma_ops(vmd);
vmd_detach_resources(vmd);
irq_domain_remove(vmd->irq_domain);
}
@ -868,6 +718,10 @@ static const struct pci_device_id vmd_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f),
.driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c3d),
.driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
.driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{0,}

View file

@ -6048,7 +6048,9 @@ bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
return (dev1->dma_alias_mask &&
test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
(dev2->dma_alias_mask &&
test_bit(dev1->devfn, dev2->dma_alias_mask));
test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
pci_real_dma_dev(dev1) == dev2 ||
pci_real_dma_dev(dev2) == dev1;
}
bool pci_device_is_present(struct pci_dev *pdev)
@ -6072,6 +6074,21 @@ void pci_ignore_hotplug(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
/**
* pci_real_dma_dev - Get PCI DMA device for PCI device
* @dev: the PCI device that may have a PCI DMA alias
*
* Permits the platform to provide architecture-specific functionality to
* devices needing to alias DMA to another PCI device on another PCI bus. If
* the PCI device is on the same bus, it is recommended to use
* pci_add_dma_alias(). This is the default implementation. Architecture
* implementations can override this.
*/
struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
{
return dev;
}
resource_size_t __weak pcibios_default_alignment(void)
{
return 0;

View file

@ -32,6 +32,12 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
struct pci_bus *bus;
int ret;
/*
* The device may have an explicit alias requester ID for DMA where the
* requester is on another PCI bus.
*/
pdev = pci_real_dma_dev(pdev);
ret = fn(pdev, pci_dev_id(pdev), data);
if (ret)
return ret;

View file

@ -1202,6 +1202,7 @@ int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
void pci_ignore_hotplug(struct pci_dev *dev);
struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,