IOMMU Fixes for Linux v5.1-rc2

Including:
 
 	- AMD IOMMU fix for sg-mapping with sg->offset > PAGE_SIZE
 
 	- Fix for IOVA code to trigger the slow-path less often
 
 	- Two fixes for Intel VT-d to avoid writing to read-onl registers and
 	  to flush the right domain id for the default domains in scalable
 	  mode
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAlyUxMcACgkQK/BELZcB
 GuPcUBAAsRaIicKz2HFsxfkGQawrTBlDCYi3xx8MndZ44ZHWk936CEli5nhSRDmd
 vCZPpl1j0HLfoUzoR4d/KEqYth+96Yh6h52e0WHh0ocIi5oJbIYZ7n8vwLT8lWQG
 X7QkT6Fx8+M3mNfuF1rY4Sa2ZexgeXQeitjip3ULXyvwqg3x7oj7KkRn6IM+hRIv
 BlCZXJNnjVQplWP4w/6j1BE5OtP1yDYCi3exOYO2A+2CIa7VHybOj+Ry2nEPYAAk
 GvZ7kNgQwxov1626QrIp8antqswtTODUqm7x7mdCfoEqX7LPEqKKs578wkHqvIis
 R8GpJxW0RI6OSSXBrRjhOhM2sgQL4csiLiMijZORGnN1jSyurY6Cdjxu24+3Gd0e
 25GIYJgR5U39Uf+g/1CIBAooPoV/sUhOVmAk0HTQvnvx7kXAzdCcIpRcU1H2k0nZ
 XaO/+KCuTnWbhbsdKCUmigtgs+zf1x7fkamiGp9d+rO4PlZn8UnMg6JnS0IuZVE8
 gvWY5orXQx5izb3JJGLD3u21/UKq+nM8+VqN1++piCy9Vu2BPJBvabgAq7iC0Xax
 CfF+5migNNfC1PMgA49LAPAyRf/CwWO1H364IyK2avAeFHCXiPSqWZ/2Urbxp2zc
 u946DsMGDxo6Wamp+s2TzXbpHZ5CbSsjLNE8PhuxDjJXwVpaDOU=
 =wkBN
 -----END PGP SIGNATURE-----

Merge tag 'iommu-fixes-v5.1-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:

 - AMD IOMMU fix for sg-mapping with sg->offset > PAGE_SIZE

 - Fix for IOVA code to trigger the slow-path less often

 - Two fixes for Intel VT-d to avoid writing to read-only registers and
   to flush the right domain id for the default domains in scalable mode

* tag 'iommu-fixes-v5.1-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/vt-d: Save the right domain ID used by hardware
  iommu/vt-d: Check capability before disabling protected memory
  iommu/iova: Fix tracking of recently failed iova address
  iommu/amd: fix sg->dma_address for sg->offset bigger than PAGE_SIZE
This commit is contained in:
Linus Torvalds 2019-03-22 14:10:27 -07:00
commit 070c95d457
3 changed files with 13 additions and 4 deletions

View file

@ -2608,7 +2608,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
/* Everything is mapped - write the right values into s->dma_address */
for_each_sg(sglist, s, nelems, i) {
s->dma_address += address + s->offset;
/*
* Add in the remaining piece of the scatter-gather offset that
* was masked out when we were determining the physical address
* via (sg_phys(s) & PAGE_MASK) earlier.
*/
s->dma_address += address + (s->offset & ~PAGE_MASK);
s->dma_length = s->length;
}

View file

@ -1538,6 +1538,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
u32 pmen;
unsigned long flags;
if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
return;
raw_spin_lock_irqsave(&iommu->register_lock, flags);
pmen = readl(iommu->reg + DMAR_PMEN_REG);
pmen &= ~DMA_PMEN_EPM;
@ -5332,7 +5335,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
ctx_lo = context[0].lo;
sdev->did = domain->iommu_did[iommu->seq_id];
sdev->did = FLPT_DEFAULT_DID;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
if (!(ctx_lo & CONTEXT_PASIDE)) {

View file

@ -207,8 +207,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
curr_iova = rb_entry(curr, struct iova, node);
} while (curr && new_pfn <= curr_iova->pfn_hi);
if (limit_pfn < size || new_pfn < iovad->start_pfn)
if (limit_pfn < size || new_pfn < iovad->start_pfn) {
iovad->max32_alloc_size = size;
goto iova32_full;
}
/* pfn_lo will point to size aligned address if size_aligned is set */
new->pfn_lo = new_pfn;
@ -222,7 +224,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
return 0;
iova32_full:
iovad->max32_alloc_size = size;
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return -ENOMEM;
}