IOMMU Fixes for Linux v5.8-rc2:

A couple of Intel VT-d fixes:
 
 	- Make Intel SVM code 64bit only. The code uses pgd_t* and the
 	  IOMMU only supports long-mode page-table formats, so its
 	  broken on 32bit anyway.
 
 	- Make sure GFX quirks in for Intel VT-d are not applied to
 	  untrusted devices. Those devices might gain full memory access
 	  otherwise.
 
 	- Identity mapping setup fix.
 
 	- Fix ACS enabling when Intel IOMMU is off and untrusted devices
 	  are detected.
 
 	- Two smaller fixes for coherency and IO page-table setup
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAl72CbcACgkQK/BELZcB
 GuM0yQ//SrGrD/al8hBssSeAsE66o7YJvG6XaBmQ4LFlap7kJpevHimScJfVjo4c
 eOpFeJ5BL11CtX8FWcvmmmWl0o+qEF/x7HxnR2VC1OHp1J2ATGMv6PmJ0jFTYiRy
 pNuqHoj4ghtwkSdX/leYc3aQM3PIS21G0ZfHM5uSnkQYtWiMG9Riu2OB1UsXHGFG
 Cjowur86lpG70Se/4GctpiahJj7L8ZVe5en12mwFGp39evckcHl562DG2JVJNhtc
 xUaiQzUwwGpe4ajQLoaeOKcZO4vrJwq/YVyNMhK27+8COFULjnUd9YwqLErdZz2C
 t0ODulgTViq3wEUiPeqKiGbAyXvfqgISInt4zRnBQQAtdCFCqwzSbbqjUz6BKa8a
 iwh7hAJiDRp6Zp7I70BhS1iEXTLqeXHrSJTG7C+3tNzhWQHqJmt+N3xC77eUTn5z
 6UCDcDjRSMC+O14QNzRyPi7XMgy4oTcSPExnUGKI46jfvXlfuh6DCCyiePrSvqp3
 3JvC+9iUQ42gdkzIfpt+dFFMZAQyGErGsu+FWkJ90nXlqyzrT1sjbVxIFlUp5zfy
 NrioH7MXhSoKPjLCsxPgoiPU+b/dIKiMZ/ErAi+CsV93vMDF3soORk3Qo1NuLxcA
 emFNrH3FkAXzHFfKieNyqhfib5xD1Xbk7aQKbtHZmm32m3ZVBrQ=
 =RRP5
 -----END PGP SIGNATURE-----

Merge tag 'iommu-fixes-v5.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:
 "A couple of Intel VT-d fixes:

   - Make Intel SVM code 64bit only. The code uses pgd_t* and the IOMMU
     only supports long-mode page-table formats, so its broken on 32bit
     anyway.

   - Make sure GFX quirks in for Intel VT-d are not applied to untrusted
     devices. Those devices might gain full memory access otherwise.

   - Identity mapping setup fix.

   - Fix ACS enabling when Intel IOMMU is off and untrusted devices are
     detected.

   - Two smaller fixes for coherency and IO page-table setup"

* tag 'iommu-fixes-v5.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/vt-d: Fix misuse of iommu_domain_identity_map()
  iommu/vt-d: Update scalable mode paging structure coherency
  iommu/vt-d: Enable PCI ACS for platform opt in hint
  iommu/vt-d: Don't apply gfx quirks to untrusted devices
  iommu/vt-d: Set U/S bit in first level page table by default
  iommu/vt-d: Make Intel SVM code 64-bit only
This commit is contained in:
Linus Torvalds 2020-06-26 12:30:07 -07:00
commit bd37cdf8ba
4 changed files with 56 additions and 9 deletions

View file

@ -211,7 +211,7 @@ config INTEL_IOMMU_DEBUGFS
config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU"
depends on INTEL_IOMMU && X86
depends on INTEL_IOMMU && X86_64
select PCI_PASID
select PCI_PRI
select MMU_NOTIFIER

View file

@ -898,7 +898,8 @@ int __init detect_intel_iommu(void)
if (!ret)
ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
&validate_drhd_cb);
if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
if (!ret && !no_iommu && !iommu_detected &&
(!dmar_disabled || dmar_platform_optin())) {
iommu_detected = 1;
/* Make sure ACS will be enabled */
pci_request_acs();

View file

@ -612,6 +612,12 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
return g_iommus[iommu_id];
}
static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
{
return sm_supported(iommu) ?
ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
}
static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
struct dmar_drhd_unit *drhd;
@ -623,7 +629,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
for_each_domain_iommu(i, domain) {
found = true;
if (!ecap_coherent(g_iommus[i]->ecap)) {
if (!iommu_paging_structure_coherency(g_iommus[i])) {
domain->iommu_coherency = 0;
break;
}
@ -634,7 +640,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
/* No hardware attached; use lowest common denominator */
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
if (!ecap_coherent(iommu->ecap)) {
if (!iommu_paging_structure_coherency(iommu)) {
domain->iommu_coherency = 0;
break;
}
@ -921,7 +927,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
if (domain_use_first_level(domain))
pteval |= DMA_FL_PTE_XD;
pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
if (cmpxchg64(&pte->val, 0ULL, pteval))
/* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page);
@ -1951,7 +1957,6 @@ static inline void
context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
{
context->hi |= pasid & ((1 << 20) - 1);
context->hi |= (1 << 20);
}
/*
@ -2095,7 +2100,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
context_set_fault_enable(context);
context_set_present(context);
domain_flush_cache(domain, context, sizeof(*context));
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(context, sizeof(*context));
/*
* It's a non-present to present mapping. If hardware doesn't cache
@ -2243,7 +2249,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
if (domain_use_first_level(domain))
attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD;
attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
if (!sg) {
sg_res = nr_pages;
@ -2695,7 +2701,9 @@ static int __init si_domain_init(int hw)
end >> agaw_to_width(si_domain->agaw)))
continue;
ret = iommu_domain_identity_map(si_domain, start, end);
ret = iommu_domain_identity_map(si_domain,
mm_to_dma_pfn(start >> PAGE_SHIFT),
mm_to_dma_pfn(end >> PAGE_SHIFT));
if (ret)
return ret;
}
@ -6021,6 +6029,23 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain,
return ret;
}
/*
* Check that the device does not live on an external facing PCI port that is
* marked as untrusted. Such devices should not be able to apply quirks and
* thus not be able to bypass the IOMMU restrictions.
*/
static bool risky_device(struct pci_dev *pdev)
{
if (pdev->untrusted) {
pci_info(pdev,
"Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n",
pdev->vendor, pdev->device);
pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n");
return true;
}
return false;
}
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
@ -6060,6 +6085,9 @@ const struct iommu_ops intel_iommu_ops = {
static void quirk_iommu_igfx(struct pci_dev *dev)
{
if (risky_device(dev))
return;
pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
dmar_map_gfx = 0;
}
@ -6101,6 +6129,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
static void quirk_iommu_rwbf(struct pci_dev *dev)
{
if (risky_device(dev))
return;
/*
* Mobile 4 Series Chipset neglects to set RWBF capability,
* but needs it. Same seems to hold for the desktop versions.
@ -6131,6 +6162,9 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
{
unsigned short ggc;
if (risky_device(dev))
return;
if (pci_read_config_word(dev, GGC, &ggc))
return;
@ -6164,6 +6198,12 @@ static void __init check_tylersburg_isoch(void)
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
if (!pdev)
return;
if (risky_device(pdev)) {
pci_dev_put(pdev);
return;
}
pci_dev_put(pdev);
/* System Management Registers. Might be hidden, in which case
@ -6173,6 +6213,11 @@ static void __init check_tylersburg_isoch(void)
if (!pdev)
return;
if (risky_device(pdev)) {
pci_dev_put(pdev);
return;
}
if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
pci_dev_put(pdev);
return;

View file

@ -41,6 +41,7 @@
#define DMA_PTE_SNP BIT_ULL(11)
#define DMA_FL_PTE_PRESENT BIT_ULL(0)
#define DMA_FL_PTE_US BIT_ULL(2)
#define DMA_FL_PTE_XD BIT_ULL(63)
#define ADDR_WIDTH_5LEVEL (57)