mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-12 21:57:43 +00:00
x86, AMD IOMMU: add mapping functions for coherent mappings
This patch adds the dma_ops functions for coherent mappings. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Cc: iommu@lists.linux-foundation.org Cc: bhavna.sarathy@amd.com Cc: Sebastian.Biemueller@amd.com Cc: robert.richter@amd.com Cc: joro@8bytes.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
65b050adbf
commit
5d8b53cf3f
1 changed files with 74 additions and 0 deletions
|
@ -798,3 +798,77 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
spin_unlock_irqrestore(&domain->lock, flags);
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void *alloc_coherent(struct device *dev, size_t size,
|
||||||
|
dma_addr_t *dma_addr, gfp_t flag)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
void *virt_addr;
|
||||||
|
struct amd_iommu *iommu;
|
||||||
|
struct protection_domain *domain;
|
||||||
|
u16 devid;
|
||||||
|
phys_addr_t paddr;
|
||||||
|
|
||||||
|
virt_addr = (void *)__get_free_pages(flag, get_order(size));
|
||||||
|
if (!virt_addr)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
memset(virt_addr, 0, size);
|
||||||
|
paddr = virt_to_phys(virt_addr);
|
||||||
|
|
||||||
|
get_device_resources(dev, &iommu, &domain, &devid);
|
||||||
|
|
||||||
|
if (!iommu || !domain) {
|
||||||
|
*dma_addr = (dma_addr_t)paddr;
|
||||||
|
return virt_addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock_irqsave(&domain->lock, flags);
|
||||||
|
|
||||||
|
*dma_addr = __map_single(dev, iommu, domain->priv, paddr,
|
||||||
|
size, DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
|
if (*dma_addr == bad_dma_address) {
|
||||||
|
free_pages((unsigned long)virt_addr, get_order(size));
|
||||||
|
virt_addr = NULL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (iommu_has_npcache(iommu))
|
||||||
|
iommu_flush_pages(iommu, domain->id, *dma_addr, size);
|
||||||
|
|
||||||
|
if (iommu->need_sync)
|
||||||
|
iommu_completion_wait(iommu);
|
||||||
|
|
||||||
|
out:
|
||||||
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
|
|
||||||
|
return virt_addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void free_coherent(struct device *dev, size_t size,
|
||||||
|
void *virt_addr, dma_addr_t dma_addr)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
struct amd_iommu *iommu;
|
||||||
|
struct protection_domain *domain;
|
||||||
|
u16 devid;
|
||||||
|
|
||||||
|
get_device_resources(dev, &iommu, &domain, &devid);
|
||||||
|
|
||||||
|
if (!iommu || !domain)
|
||||||
|
goto free_mem;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&domain->lock, flags);
|
||||||
|
|
||||||
|
__unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
|
||||||
|
iommu_flush_pages(iommu, domain->id, dma_addr, size);
|
||||||
|
|
||||||
|
if (iommu->need_sync)
|
||||||
|
iommu_completion_wait(iommu);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
|
|
||||||
|
free_mem:
|
||||||
|
free_pages((unsigned long)virt_addr, get_order(size));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue