mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-28 23:24:50 +00:00
x86/amd-iommu: Add function to complete a tlb flush
This patch adds a function to the AMD IOMMU driver which completes all queued commands an all IOMMUs a specific domain has devices attached on. This is required in a later patch when per-domain flushing is implemented. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
This commit is contained in:
parent
c459611424
commit
0518a3a458
1 changed files with 22 additions and 6 deletions
|
@ -376,6 +376,22 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void iommu_flush_complete(struct protection_domain *domain)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < amd_iommus_present; ++i) {
|
||||||
|
if (!domain->dev_iommu[i])
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Devices of this domain are behind this IOMMU
|
||||||
|
* We need to wait for completion of all commands.
|
||||||
|
*/
|
||||||
|
iommu_completion_wait(amd_iommus[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Command send function for invalidating a device table entry
|
* Command send function for invalidating a device table entry
|
||||||
*/
|
*/
|
||||||
|
@ -1758,7 +1774,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
|
||||||
if (addr == DMA_ERROR_CODE)
|
if (addr == DMA_ERROR_CODE)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
iommu_completion_wait(iommu);
|
iommu_flush_complete(domain);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock_irqrestore(&domain->lock, flags);
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
|
@ -1791,7 +1807,7 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||||
|
|
||||||
__unmap_single(iommu, domain->priv, dma_addr, size, dir);
|
__unmap_single(iommu, domain->priv, dma_addr, size, dir);
|
||||||
|
|
||||||
iommu_completion_wait(iommu);
|
iommu_flush_complete(domain);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&domain->lock, flags);
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -1863,7 +1879,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
goto unmap;
|
goto unmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
iommu_completion_wait(iommu);
|
iommu_flush_complete(domain);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock_irqrestore(&domain->lock, flags);
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
|
@ -1914,7 +1930,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
s->dma_address = s->dma_length = 0;
|
s->dma_address = s->dma_length = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
iommu_completion_wait(iommu);
|
iommu_flush_complete(domain);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&domain->lock, flags);
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -1969,7 +1985,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
iommu_completion_wait(iommu);
|
iommu_flush_complete(domain);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&domain->lock, flags);
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
|
|
||||||
|
@ -2010,7 +2026,7 @@ static void free_coherent(struct device *dev, size_t size,
|
||||||
|
|
||||||
__unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
|
__unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
iommu_completion_wait(iommu);
|
iommu_flush_complete(domain);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&domain->lock, flags);
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue