Merge branch 'for-next/iommu/vt-d' into for-next/iommu/core

Intel VT-D updates for 5.11. The main thing here is converting the code
over to the iommu-dma API, which required some improvements to the core
code to preserve existing functionality.

* for-next/iommu/vt-d:
  iommu/vt-d: Avoid GFP_ATOMIC where it is not needed
  iommu/vt-d: Remove set but not used variable
  iommu/vt-d: Cleanup after converting to dma-iommu ops
  iommu/vt-d: Convert intel iommu driver to the iommu ops
  iommu/vt-d: Update domain geometry in iommu_ops.at(de)tach_dev
  iommu: Add quirk for Intel graphic devices in map_sg
  iommu: Allow the dma-iommu api to use bounce buffers
  iommu: Add iommu_dma_free_cpu_cached_iovas()
  iommu: Handle freelists when using deferred flushing in iommu drivers
  iommu/vt-d: include conditionally on CONFIG_INTEL_IOMMU_SVM
This commit is contained in:
Will Deacon 2020-12-08 15:11:58 +00:00
commit 113eb4ce4f
9 changed files with 351 additions and 834 deletions

View File

@ -1883,11 +1883,6 @@
Note that using this option lowers the security Note that using this option lowers the security
provided by tboot because it makes the system provided by tboot because it makes the system
vulnerable to DMA attacks. vulnerable to DMA attacks.
nobounce [Default off]
Disable bounce buffer for untrusted devices such as
the Thunderbolt devices. This will treat the untrusted
devices as the trusted ones, hence might expose security
risks of DMA attacks.
intel_idle.max_cstate= [KNL,HW,ACPI,X86] intel_idle.max_cstate= [KNL,HW,ACPI,X86]
0 disables intel_idle and fall back on acpi_idle. 0 disables intel_idle and fall back on acpi_idle.

View File

@ -20,9 +20,11 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/swiotlb.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/crash_dump.h> #include <linux/crash_dump.h>
#include <linux/dma-direct.h>
struct iommu_dma_msi_page { struct iommu_dma_msi_page {
struct list_head list; struct list_head list;
@ -49,6 +51,27 @@ struct iommu_dma_cookie {
struct iommu_domain *fq_domain; struct iommu_domain *fq_domain;
}; };
void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
struct iommu_domain *domain)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
free_cpu_cached_iovas(cpu, iovad);
}
static void iommu_dma_entry_dtor(unsigned long data)
{
struct page *freelist = (struct page *)data;
while (freelist) {
unsigned long p = (unsigned long)page_address(freelist);
freelist = freelist->freelist;
free_page(p);
}
}
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
{ {
if (cookie->type == IOMMU_DMA_IOVA_COOKIE) if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
@ -343,7 +366,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
if (!cookie->fq_domain && !iommu_domain_get_attr(domain, if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
NULL)) iommu_dma_entry_dtor))
pr_warn("iova flush queue initialization failed\n"); pr_warn("iova flush queue initialization failed\n");
else else
cookie->fq_domain = domain; cookie->fq_domain = domain;
@ -440,7 +463,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
} }
static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
dma_addr_t iova, size_t size) dma_addr_t iova, size_t size, struct page *freelist)
{ {
struct iova_domain *iovad = &cookie->iovad; struct iova_domain *iovad = &cookie->iovad;
@ -449,7 +472,8 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
cookie->msi_iova -= size; cookie->msi_iova -= size;
else if (cookie->fq_domain) /* non-strict mode */ else if (cookie->fq_domain) /* non-strict mode */
queue_iova(iovad, iova_pfn(iovad, iova), queue_iova(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad), 0); size >> iova_shift(iovad),
(unsigned long)freelist);
else else
free_iova_fast(iovad, iova_pfn(iovad, iova), free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad)); size >> iova_shift(iovad));
@ -474,7 +498,32 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
if (!cookie->fq_domain) if (!cookie->fq_domain)
iommu_iotlb_sync(domain, &iotlb_gather); iommu_iotlb_sync(domain, &iotlb_gather);
iommu_dma_free_iova(cookie, dma_addr, size); iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
}
static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
phys_addr_t phys;
phys = iommu_iova_to_phys(domain, dma_addr);
if (WARN_ON(!phys))
return;
__iommu_dma_unmap(dev, dma_addr, size);
if (unlikely(is_swiotlb_buffer(phys)))
swiotlb_tbl_unmap_single(dev, phys, size,
iova_align(iovad, size), dir, attrs);
}
static bool dev_is_untrusted(struct device *dev)
{
return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
} }
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
@ -496,12 +545,60 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
iommu_dma_free_iova(cookie, iova, size); iommu_dma_free_iova(cookie, iova, size, NULL);
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
} }
return iova + iova_off; return iova + iova_off;
} }
static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
size_t org_size, dma_addr_t dma_mask, bool coherent,
enum dma_data_direction dir, unsigned long attrs)
{
int prot = dma_info_to_prot(dir, coherent, attrs);
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
size_t aligned_size = org_size;
void *padding_start;
size_t padding_size;
dma_addr_t iova;
/*
* If both the physical buffer start address and size are
* page aligned, we don't need to use a bounce page.
*/
if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
iova_offset(iovad, phys | org_size)) {
aligned_size = iova_align(iovad, org_size);
phys = swiotlb_tbl_map_single(dev, phys, org_size,
aligned_size, dir, attrs);
if (phys == DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
/* Cleanup the padding area. */
padding_start = phys_to_virt(phys);
padding_size = aligned_size;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
(dir == DMA_TO_DEVICE ||
dir == DMA_BIDIRECTIONAL)) {
padding_start += org_size;
padding_size -= org_size;
}
memset(padding_start, 0, padding_size);
}
iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
if ((iova == DMA_MAPPING_ERROR) && is_swiotlb_buffer(phys))
swiotlb_tbl_unmap_single(dev, phys, org_size,
aligned_size, dir, attrs);
return iova;
}
static void __iommu_dma_free_pages(struct page **pages, int count) static void __iommu_dma_free_pages(struct page **pages, int count)
{ {
while (count--) while (count--)
@ -649,7 +746,7 @@ out_unmap:
out_free_sg: out_free_sg:
sg_free_table(&sgt); sg_free_table(&sgt);
out_free_iova: out_free_iova:
iommu_dma_free_iova(cookie, iova, size); iommu_dma_free_iova(cookie, iova, size, NULL);
out_free_pages: out_free_pages:
__iommu_dma_free_pages(pages, count); __iommu_dma_free_pages(pages, count);
return NULL; return NULL;
@ -675,11 +772,15 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
{ {
phys_addr_t phys; phys_addr_t phys;
if (dev_is_dma_coherent(dev)) if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
return; return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
arch_sync_dma_for_cpu(phys, size, dir); if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(phys, size, dir);
if (is_swiotlb_buffer(phys))
swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU);
} }
static void iommu_dma_sync_single_for_device(struct device *dev, static void iommu_dma_sync_single_for_device(struct device *dev,
@ -687,11 +788,15 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
{ {
phys_addr_t phys; phys_addr_t phys;
if (dev_is_dma_coherent(dev)) if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
return; return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
arch_sync_dma_for_device(phys, size, dir); if (is_swiotlb_buffer(phys))
swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE);
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(phys, size, dir);
} }
static void iommu_dma_sync_sg_for_cpu(struct device *dev, static void iommu_dma_sync_sg_for_cpu(struct device *dev,
@ -701,11 +806,17 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg; struct scatterlist *sg;
int i; int i;
if (dev_is_dma_coherent(dev)) if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
return; return;
for_each_sg(sgl, sg, nelems, i) for_each_sg(sgl, sg, nelems, i) {
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
if (is_swiotlb_buffer(sg_phys(sg)))
swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
dir, SYNC_FOR_CPU);
}
} }
static void iommu_dma_sync_sg_for_device(struct device *dev, static void iommu_dma_sync_sg_for_device(struct device *dev,
@ -715,11 +826,17 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg; struct scatterlist *sg;
int i; int i;
if (dev_is_dma_coherent(dev)) if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
return; return;
for_each_sg(sgl, sg, nelems, i) for_each_sg(sgl, sg, nelems, i) {
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); if (is_swiotlb_buffer(sg_phys(sg)))
swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
dir, SYNC_FOR_DEVICE);
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
} }
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
@ -728,10 +845,10 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
{ {
phys_addr_t phys = page_to_phys(page) + offset; phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev); bool coherent = dev_is_dma_coherent(dev);
int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dma_handle; dma_addr_t dma_handle;
dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev)); dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
coherent, dir, attrs);
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
dma_handle != DMA_MAPPING_ERROR) dma_handle != DMA_MAPPING_ERROR)
arch_sync_dma_for_device(phys, size, dir); arch_sync_dma_for_device(phys, size, dir);
@ -743,7 +860,7 @@ static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
{ {
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
__iommu_dma_unmap(dev, dma_handle, size); __iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs);
} }
/* /*
@ -761,6 +878,33 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
int i, count = 0; int i, count = 0;
/*
* The Intel graphic driver is used to assume that the returned
* sg list is not combound. This blocks the efforts of converting
* Intel IOMMU driver to dma-iommu api's. Add this quirk to make the
* device driver work and should be removed once it's fixed in i915
* driver.
*/
if (IS_ENABLED(CONFIG_DRM_I915) && dev_is_pci(dev) &&
to_pci_dev(dev)->vendor == PCI_VENDOR_ID_INTEL &&
(to_pci_dev(dev)->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
for_each_sg(sg, s, nents, i) {
unsigned int s_iova_off = sg_dma_address(s);
unsigned int s_length = sg_dma_len(s);
unsigned int s_iova_len = s->length;
s->offset += s_iova_off;
s->length = s_length;
sg_dma_address(s) = dma_addr + s_iova_off;
sg_dma_len(s) = s_length;
dma_addr += s_iova_len;
pr_info_once("sg combining disabled due to i915 driver\n");
}
return nents;
}
for_each_sg(sg, s, nents, i) { for_each_sg(sg, s, nents, i) {
/* Restore this segment's original unaligned fields first */ /* Restore this segment's original unaligned fields first */
unsigned int s_iova_off = sg_dma_address(s); unsigned int s_iova_off = sg_dma_address(s);
@ -821,6 +965,39 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
} }
} }
static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i)
__iommu_dma_unmap_swiotlb(dev, sg_dma_address(s),
sg_dma_len(s), dir, attrs);
}
static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
sg_dma_address(s) = __iommu_dma_map_swiotlb(dev, sg_phys(s),
s->length, dma_get_mask(dev),
dev_is_dma_coherent(dev), dir, attrs);
if (sg_dma_address(s) == DMA_MAPPING_ERROR)
goto out_unmap;
sg_dma_len(s) = s->length;
}
return nents;
out_unmap:
iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
return 0;
}
/* /*
* The DMA API client is passing in a scatterlist which could describe * The DMA API client is passing in a scatterlist which could describe
* any old buffer layout, but the IOMMU API requires everything to be * any old buffer layout, but the IOMMU API requires everything to be
@ -847,6 +1024,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_device(dev, sg, nents, dir); iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
if (dev_is_untrusted(dev))
return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
/* /*
* Work out how much IOVA space we need, and align the segments to * Work out how much IOVA space we need, and align the segments to
* IOVA granules for the IOMMU driver to handle. With some clever * IOVA granules for the IOMMU driver to handle. With some clever
@ -900,7 +1080,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
return __finalise_sg(dev, sg, nents, iova); return __finalise_sg(dev, sg, nents, iova);
out_free_iova: out_free_iova:
iommu_dma_free_iova(cookie, iova, iova_len); iommu_dma_free_iova(cookie, iova, iova_len, NULL);
out_restore_sg: out_restore_sg:
__invalidate_sg(sg, nents); __invalidate_sg(sg, nents);
return 0; return 0;
@ -916,6 +1096,11 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
if (dev_is_untrusted(dev)) {
iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
return;
}
/* /*
* The scatterlist segments are mapped into a single * The scatterlist segments are mapped into a single
* contiguous IOVA allocation, so this is incredibly easy. * contiguous IOVA allocation, so this is incredibly easy.
@ -1228,7 +1413,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
return msi_page; return msi_page;
out_free_iova: out_free_iova:
iommu_dma_free_iova(cookie, iova, size); iommu_dma_free_iova(cookie, iova, size, NULL);
out_free_page: out_free_page:
kfree(msi_page); kfree(msi_page);
return NULL; return NULL;

View File

@ -13,6 +13,7 @@ config INTEL_IOMMU
select DMAR_TABLE select DMAR_TABLE
select SWIOTLB select SWIOTLB
select IOASID select IOASID
select IOMMU_DMA
help help
DMA remapping (DMAR) devices support enables independent address DMA remapping (DMAR) devices support enables independent address
translations for Direct Memory Access (DMA) from devices. translations for Direct Memory Access (DMA) from devices.

File diff suppressed because it is too large Load Diff

View File

@ -395,8 +395,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
*/ */
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
map = swiotlb_tbl_map_single(dev, virt_to_phys(xen_io_tlb_start), map = swiotlb_tbl_map_single(dev, phys, size, size, dir, attrs);
phys, size, size, dir, attrs);
if (map == (phys_addr_t)DMA_MAPPING_ERROR) if (map == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;

View File

@ -37,6 +37,9 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
struct iommu_domain *domain);
#else /* CONFIG_IOMMU_DMA */ #else /* CONFIG_IOMMU_DMA */
struct iommu_domain; struct iommu_domain;
@ -78,5 +81,10 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he
{ {
} }
static inline void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
struct iommu_domain *domain)
{
}
#endif /* CONFIG_IOMMU_DMA */ #endif /* CONFIG_IOMMU_DMA */
#endif /* __DMA_IOMMU_H */ #endif /* __DMA_IOMMU_H */

View File

@ -181,6 +181,7 @@ struct iommu_iotlb_gather {
unsigned long start; unsigned long start;
unsigned long end; unsigned long end;
size_t pgsize; size_t pgsize;
struct page *freelist;
}; };
/** /**

View File

@ -45,13 +45,9 @@ enum dma_sync_target {
SYNC_FOR_DEVICE = 1, SYNC_FOR_DEVICE = 1,
}; };
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
dma_addr_t tbl_dma_addr, size_t mapping_size, size_t alloc_size,
phys_addr_t phys, enum dma_data_direction dir, unsigned long attrs);
size_t mapping_size,
size_t alloc_size,
enum dma_data_direction dir,
unsigned long attrs);
extern void swiotlb_tbl_unmap_single(struct device *hwdev, extern void swiotlb_tbl_unmap_single(struct device *hwdev,
phys_addr_t tlb_addr, phys_addr_t tlb_addr,

View File

@ -229,6 +229,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
} }
io_tlb_index = 0; io_tlb_index = 0;
no_iotlb_memory = false;
if (verbose) if (verbose)
swiotlb_print_info(); swiotlb_print_info();
@ -260,9 +261,11 @@ swiotlb_init(int verbose)
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
return; return;
if (io_tlb_start) if (io_tlb_start) {
memblock_free_early(io_tlb_start, memblock_free_early(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
io_tlb_start = 0;
}
pr_warn("Cannot allocate buffer"); pr_warn("Cannot allocate buffer");
no_iotlb_memory = true; no_iotlb_memory = true;
} }
@ -360,6 +363,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
} }
io_tlb_index = 0; io_tlb_index = 0;
no_iotlb_memory = false;
swiotlb_print_info(); swiotlb_print_info();
@ -441,14 +445,11 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
} }
} }
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
dma_addr_t tbl_dma_addr, size_t mapping_size, size_t alloc_size,
phys_addr_t orig_addr, enum dma_data_direction dir, unsigned long attrs)
size_t mapping_size,
size_t alloc_size,
enum dma_data_direction dir,
unsigned long attrs)
{ {
dma_addr_t tbl_dma_addr = phys_to_dma_unencrypted(hwdev, io_tlb_start);
unsigned long flags; unsigned long flags;
phys_addr_t tlb_addr; phys_addr_t tlb_addr;
unsigned int nslots, stride, index, wrap; unsigned int nslots, stride, index, wrap;
@ -667,9 +668,8 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size, trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
swiotlb_force); swiotlb_force);
swiotlb_addr = swiotlb_tbl_map_single(dev, swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, dir,
phys_to_dma_unencrypted(dev, io_tlb_start), attrs);
paddr, size, size, dir, attrs);
if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR) if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;