dma-mapping updates for 5.5-rc1

- improve dma-debug scalability (Eric Dumazet)
  - tiny dma-debug cleanup (Dan Carpenter)
  - check for vmap memory in dma_map_single (Kees Cook)
  - check for dma_addr_t overflows in dma-direct when using
    DMA offsets (Nicolas Saenz Julienne)
  - switch the x86 sta2x11 SOC to use more generic DMA code
    (Nicolas Saenz Julienne)
  - fix arm-nommu dma-ranges handling (Vladimir Murzin)
  - use __initdata in CMA (Shyam Saini)
  - replace the bus dma mask with a limit (Nicolas Saenz Julienne)
  - merge the remapping helpers into the main dma-direct flow (me)
  - switch xtensa to the generic dma remap handling (me)
  - various cleanups around dma_capable (me)
  - remove unused dev arguments to various dma-noncoherent helpers (me)
 -----BEGIN PGP SIGNATURE-----
 
 iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAl3f+eULHGhjaEBsc3Qu
 ZGUACgkQD55TZVIEUYPyPg/+PVHCrhmepudQQFHu6wfurE5U77iNnoUifvG+b5z5
 5mHmTMkQwyox6rKDe8NuFApAhz1VJDSUgSelPmvTSOIEIGXCvX1p+GqRSVS5YQON
 aLzGvbWKE8hCpaPdDHKYDauD1FZGMM8L2P5oOMF9X9fQ94xxRqfqJM6c8iD16Sgg
 +aOgPNzTnxQHJFF/Dbt/mjJrKXWI+XF+bgUbH+l9yKa7Dd7ibmJR8yl9hs1jmp0H
 1CZ+CizwnAs57rCd1a6Ybc6gj59tySc03NMnnbTko+KDxrcbD3Ee2tpqHVkkCjYz
 Yl0m4FIpbotrpokL/FIS727bVvkjbWgoeM+kiVPoYzmZea3pq/tFDr6tp/BxDhFj
 TZXSFfgQljlYMD3ppSoklFlfjGriVWV0tPO3arPXwuuMF5EX/IMQmvxei05jpc8n
 iELNXOP9iZZkY4tLHy2hn2uWrxBRrS1WQwlLg9hahlNRzyfFSyHeP0zWlVDt+RgF
 5CCbEI+HQcUqg1FApB30lQNWTn1+dJftrpKVBlgNBIyIa/z2rFbt8GdSnItxjfQX
 /XX8EZbFvF6AcXkgURkYFIoKM/EbYShOSLcYA3PTUtcuTnF6Kk5eimySiGWZTVCS
 prruSFDZJOvL3SnOIMIiYVmBdB7lEbDyLI/VYuhoECXEDCJpVmRktNkJNg4q6/E+
 fjQ=
 =e5wO
 -----END PGP SIGNATURE-----

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux; tag 'dma-mapping-5.5' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

 - improve dma-debug scalability (Eric Dumazet)

 - tiny dma-debug cleanup (Dan Carpenter)

 - check for vmap memory in dma_map_single (Kees Cook)

 - check for dma_addr_t overflows in dma-direct when using DMA offsets
   (Nicolas Saenz Julienne)

 - switch the x86 sta2x11 SOC to use more generic DMA code (Nicolas
   Saenz Julienne)

 - fix arm-nommu dma-ranges handling (Vladimir Murzin)

 - use __initdata in CMA (Shyam Saini)

 - replace the bus dma mask with a limit (Nicolas Saenz Julienne)

 - merge the remapping helpers into the main dma-direct flow (me)

 - switch xtensa to the generic dma remap handling (me)

 - various cleanups around dma_capable (me)

 - remove unused dev arguments to various dma-noncoherent helpers (me)

* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux:

* tag 'dma-mapping-5.5' of git://git.infradead.org/users/hch/dma-mapping: (22 commits)
  dma-mapping: treat dev->bus_dma_mask as a DMA limit
  dma-direct: exclude dma_direct_map_resource from the min_low_pfn check
  dma-direct: don't check swiotlb=force in dma_direct_map_resource
  dma-debug: clean up put_hash_bucket()
  powerpc: remove support for NULL dev in __phys_to_dma / __dma_to_phys
  dma-direct: avoid a forward declaration for phys_to_dma
  dma-direct: unify the dma_capable definitions
  dma-mapping: drop the dev argument to arch_sync_dma_for_*
  x86/PCI: sta2x11: use default DMA address translation
  dma-direct: check for overflows on 32 bit DMA addresses
  dma-debug: increase HASH_SIZE
  dma-debug: reorder struct dma_debug_entry fields
  xtensa: use the generic uncached segment support
  dma-mapping: merge the generic remapping helpers into dma-direct
  dma-direct: provide mmap and get_sgtable method overrides
  dma-direct: remove the dma_handle argument to __dma_direct_alloc_pages
  dma-direct: remove __dma_direct_free_pages
  usb: core: Remove redundant vmap checks
  kernel: dma-contiguous: mark CMA parameters __initdata/__initconst
  dma-debug: add a schedule point in debug_dma_dump_mappings()
  ...
This commit is contained in:
Linus Torvalds 2019-11-28 11:16:43 -08:00
commit 81b6b96475
64 changed files with 405 additions and 672 deletions

View File

@ -6,7 +6,6 @@
config ARC
def_bool y
select ARC_TIMERS
select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SETUP_DMA_OPS

View File

@ -48,8 +48,8 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
* upper layer functions (in include/linux/dma-mapping.h)
*/
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
@ -69,8 +69,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
}
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:

View File

@ -7,7 +7,6 @@ config ARM
select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE

View File

@ -14,23 +14,4 @@ static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
}
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
u64 limit, mask;
if (!dev->dma_mask)
return 0;
mask = *dev->dma_mask;
limit = (mask + 1) & ~mask;
if (limit && size > limit)
return 0;
if ((addr | (addr + size - 1)) & ~mask)
return 0;
return 1;
}
#endif /* ASM_ARM_DMA_DIRECT_H */

View File

@ -35,7 +35,7 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
unsigned long attrs)
{
void *ret = dma_alloc_from_global_coherent(size, dma_handle);
void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle);
/*
* dma_alloc_from_global_coherent() may fail because:

View File

@ -2332,26 +2332,20 @@ void arch_teardown_dma_ops(struct device *dev)
}
#ifdef CONFIG_SWIOTLB
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
size, dir);
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
size, dir);
}
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr)
{
return dma_to_pfn(dev, dma_addr);
}
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{

View File

@ -71,20 +71,20 @@ static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op)
* pfn_valid returns true the pages is local and we can use the native
* dma-direct functions, otherwise we call the Xen specific version.
*/
void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
phys_addr_t paddr, size_t size, enum dma_data_direction dir)
void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
if (pfn_valid(PFN_DOWN(handle)))
arch_sync_dma_for_cpu(dev, paddr, size, dir);
arch_sync_dma_for_cpu(paddr, size, dir);
else if (dir != DMA_TO_DEVICE)
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
}
void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
phys_addr_t paddr, size_t size, enum dma_data_direction dir)
void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
if (pfn_valid(PFN_DOWN(handle)))
arch_sync_dma_for_device(dev, paddr, size, dir);
arch_sync_dma_for_device(paddr, size, dir);
else if (dir == DMA_FROM_DEVICE)
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
else

View File

@ -12,7 +12,6 @@ config ARM64
select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_FAST_MULTIPLIER

View File

@ -13,14 +13,14 @@
#include <asm/cacheflush.h>
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
__dma_map_area(phys_to_virt(paddr), size, dir);
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
__dma_unmap_area(phys_to_virt(paddr), size, dir);
}

View File

@ -140,7 +140,7 @@ void __init coherent_mem_init(phys_addr_t start, u32 size)
sizeof(long));
}
static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
static void c6x_dma_sync(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
BUG_ON(!valid_dma_direction(dir));
@ -160,14 +160,14 @@ static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
}
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
return c6x_dma_sync(dev, paddr, size, dir);
return c6x_dma_sync(paddr, size, dir);
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
return c6x_dma_sync(dev, paddr, size, dir);
return c6x_dma_sync(paddr, size, dir);
}

View File

@ -58,8 +58,8 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
@ -74,8 +74,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
}
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:

View File

@ -55,8 +55,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
void *addr = phys_to_virt(paddr);

View File

@ -33,7 +33,7 @@ config IA64
select HAVE_ARCH_TRACEHOOK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_VIRT_CPU_ACCOUNTING
select ARCH_HAS_DMA_COHERENT_TO_PFN
select DMA_NONCOHERENT_MMAP
select ARCH_HAS_SYNC_DMA_FOR_CPU
select VIRT_TO_BUS
select GENERIC_IRQ_PROBE

View File

@ -19,9 +19,3 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
{
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
}
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr)
{
return page_to_pfn(virt_to_page(cpu_addr));
}

View File

@ -73,8 +73,8 @@ __ia64_sync_icache_dcache (pte_t pte)
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
unsigned long pfn = PHYS_PFN(paddr);

View File

@ -61,8 +61,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t handle, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:

View File

@ -4,7 +4,6 @@ config MICROBLAZE
select ARCH_32BIT_OFF_T
select ARCH_NO_SWAP
select ARCH_HAS_BINFMT_FLAT if !MMU
select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU

View File

@ -15,7 +15,7 @@
#include <linux/bug.h>
#include <asm/cacheflush.h>
static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
static void __dma_sync(phys_addr_t paddr, size_t size,
enum dma_data_direction direction)
{
switch (direction) {
@ -31,14 +31,14 @@ static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
}
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
__dma_sync(dev, paddr, size, dir);
__dma_sync(paddr, size, dir);
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
__dma_sync(dev, paddr, size, dir);
__dma_sync(paddr, size, dir);
}

View File

@ -1194,9 +1194,9 @@ config DMA_NONCOHERENT
select ARCH_HAS_DMA_WRITE_COMBINE
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_UNCACHED_SEGMENT
select NEED_DMA_MAP_STATE
select ARCH_HAS_DMA_COHERENT_TO_PFN
select DMA_NONCOHERENT_MMAP
select DMA_NONCOHERENT_CACHE_SYNC
select NEED_DMA_MAP_STATE
config SYS_HAS_EARLY_PRINTK
bool

View File

@ -64,7 +64,7 @@ phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
return dma_addr;
}
void arch_sync_dma_for_cpu_all(struct device *dev)
void arch_sync_dma_for_cpu_all(void)
{
void __iomem *cbr = BMIPS_GET_CBR();
u32 cfg;

View File

@ -2,14 +2,6 @@
#ifndef _MIPS_DMA_DIRECT_H
#define _MIPS_DMA_DIRECT_H 1
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
return false;
return addr + size - 1 <= *dev->dma_mask;
}
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);

View File

@ -592,7 +592,7 @@ static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
phys_addr_t phys = page_to_phys(page) + offset;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
arch_sync_dma_for_device(dev, phys, size, dir);
arch_sync_dma_for_device(phys, size, dir);
return vdma_alloc(phys, size);
}
@ -600,7 +600,7 @@ static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
arch_sync_dma_for_cpu(dev, vdma_log2phys(dma_addr), size, dir);
arch_sync_dma_for_cpu(vdma_log2phys(dma_addr), size, dir);
vdma_free(dma_addr);
}
@ -612,7 +612,7 @@ static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
for_each_sg(sglist, sg, nents, i) {
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
arch_sync_dma_for_device(sg_phys(sg), sg->length,
dir);
sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
if (sg->dma_address == DMA_MAPPING_ERROR)
@ -631,8 +631,7 @@ static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
for_each_sg(sglist, sg, nents, i) {
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length,
dir);
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
vdma_free(sg->dma_address);
}
}
@ -640,13 +639,13 @@ static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
static void jazz_dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
arch_sync_dma_for_device(dev, vdma_log2phys(addr), size, dir);
arch_sync_dma_for_device(vdma_log2phys(addr), size, dir);
}
static void jazz_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
arch_sync_dma_for_cpu(dev, vdma_log2phys(addr), size, dir);
arch_sync_dma_for_cpu(vdma_log2phys(addr), size, dir);
}
static void jazz_dma_sync_sg_for_device(struct device *dev,
@ -656,7 +655,7 @@ static void jazz_dma_sync_sg_for_device(struct device *dev,
int i;
for_each_sg(sgl, sg, nents, i)
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
static void jazz_dma_sync_sg_for_cpu(struct device *dev,
@ -666,7 +665,7 @@ static void jazz_dma_sync_sg_for_cpu(struct device *dev,
int i;
for_each_sg(sgl, sg, nents, i)
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
}
const struct dma_map_ops jazz_dma_ops = {

View File

@ -27,7 +27,7 @@
* R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
* SGI IP32 aka O2.
*/
static inline bool cpu_needs_post_dma_flush(struct device *dev)
static inline bool cpu_needs_post_dma_flush(void)
{
switch (boot_cpu_type()) {
case CPU_R10000:
@ -59,12 +59,6 @@ void *cached_kernel_address(void *addr)
return __va(addr) - UNCAC_BASE;
}
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr)
{
return page_to_pfn(virt_to_page(cached_kernel_address(cpu_addr)));
}
static inline void dma_sync_virt(void *addr, size_t size,
enum dma_data_direction dir)
{
@ -118,17 +112,17 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
} while (left);
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
dma_sync_phys(paddr, size, dir);
}
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
if (cpu_needs_post_dma_flush(dev))
if (cpu_needs_post_dma_flush())
dma_sync_phys(paddr, size, dir);
}
#endif

View File

@ -21,22 +21,22 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
/*
* The BCM1250, etc. PCI host bridge does not support DAC on its 32-bit
* bus, so we set the bus's DMA mask accordingly. However the HT link
* bus, so we set the bus's DMA limit accordingly. However the HT link
* down the artificial PCI-HT bridge supports 40-bit addressing and the
* SP1011 HT-PCI bridge downstream supports both DAC and a 64-bit bus
* width, so we record the PCI-HT bridge's secondary and subordinate bus
* numbers and do not set the mask for devices present in the inclusive
* numbers and do not set the limit for devices present in the inclusive
* range of those.
*/
struct sb1250_bus_dma_mask_exclude {
struct sb1250_bus_dma_limit_exclude {
bool set;
unsigned char start;
unsigned char end;
};
static int sb1250_bus_dma_mask(struct pci_dev *dev, void *data)
static int sb1250_bus_dma_limit(struct pci_dev *dev, void *data)
{
struct sb1250_bus_dma_mask_exclude *exclude = data;
struct sb1250_bus_dma_limit_exclude *exclude = data;
bool exclude_this;
bool ht_bridge;
@ -55,7 +55,7 @@ static int sb1250_bus_dma_mask(struct pci_dev *dev, void *data)
exclude->start, exclude->end);
} else {
dev_dbg(&dev->dev, "disabling DAC for device");
dev->dev.bus_dma_mask = DMA_BIT_MASK(32);
dev->dev.bus_dma_limit = DMA_BIT_MASK(32);
}
return 0;
@ -63,9 +63,9 @@ static int sb1250_bus_dma_mask(struct pci_dev *dev, void *data)
static void quirk_sb1250_pci_dac(struct pci_dev *dev)
{
struct sb1250_bus_dma_mask_exclude exclude = { .set = false };
struct sb1250_bus_dma_limit_exclude exclude = { .set = false };
pci_walk_bus(dev->bus, sb1250_bus_dma_mask, &exclude);
pci_walk_bus(dev->bus, sb1250_bus_dma_limit, &exclude);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
quirk_sb1250_pci_dac);

View File

@ -46,8 +46,8 @@ static inline void cache_op(phys_addr_t paddr, size_t size,
} while (left);
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_FROM_DEVICE:
@ -61,8 +61,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
}
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:

View File

@ -18,8 +18,8 @@
#include <linux/cache.h>
#include <asm/cacheflush.h>
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
void *vaddr = phys_to_virt(paddr);
@ -42,8 +42,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
}
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
void *vaddr = phys_to_virt(paddr);

View File

@ -125,7 +125,7 @@ arch_dma_free(struct device *dev, size_t size, void *vaddr,
free_pages_exact(vaddr, size);
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size,
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
enum dma_data_direction dir)
{
unsigned long cl;

View File

@ -439,14 +439,14 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
free_pages((unsigned long)__va(dma_handle), order);
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
}

View File

@ -2,26 +2,13 @@
#ifndef ASM_POWERPC_DMA_DIRECT_H
#define ASM_POWERPC_DMA_DIRECT_H 1
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
return false;
return addr + size - 1 <=
min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
}
static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
{
if (!dev)
return paddr + PCI_DRAM_OFFSET;
return paddr + dev->archdata.dma_offset;
}
static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
{
if (!dev)
return daddr - PCI_DRAM_OFFSET;
return daddr - dev->archdata.dma_offset;
}
#endif /* ASM_POWERPC_DMA_DIRECT_H */

View File

@ -104,14 +104,14 @@ static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir)
#endif
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
__dma_sync_page(paddr, size, dir);
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
__dma_sync_page(paddr, size, dir);
}

View File

@ -459,7 +459,6 @@ config NOT_COHERENT_CACHE
bool
depends on 4xx || PPC_8xx || E200 || PPC_MPC512x || \
GAMECUBE_COMMON || AMIGAONE
select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_SYNC_DMA_FOR_CPU

View File

@ -115,8 +115,8 @@ static void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
pdev->dev.bus_dma_mask =
hose->dma_window_base_cur + hose->dma_window_size;
pdev->dev.bus_dma_limit =
hose->dma_window_base_cur + hose->dma_window_size - 1;
}
static void setup_swiotlb_ops(struct pci_controller *hose)
@ -135,7 +135,7 @@ static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
* mapping that allows addressing any RAM address from across PCI.
*/
if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
dev->bus_dma_mask = 0;
dev->bus_dma_limit = 0;
dev->archdata.dma_offset = pci64_dma_offset;
}
}

View File

@ -25,7 +25,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
* Pages from the page allocator may have data present in
* cache. So flush the cache before using uncached memory.
*/
arch_sync_dma_for_device(dev, virt_to_phys(ret), size,
arch_sync_dma_for_device(virt_to_phys(ret), size,
DMA_BIDIRECTIONAL);
ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
@ -59,8 +59,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
iounmap(vaddr);
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
void *addr = sh_cacheop_vaddr(phys_to_virt(paddr));

View File

@ -366,8 +366,8 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
/* IIep is write-through, not flushing on cpu to device transfer. */
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
if (dir != PCI_DMA_TODEVICE)
dma_make_coherent(paddr, PAGE_ALIGN(size));

View File

@ -708,7 +708,6 @@ config X86_SUPPORTS_MEMORY_FAILURE
config STA2X11
bool "STA2X11 Companion Chip Support"
depends on X86_32_NON_STANDARD && PCI
select ARCH_HAS_PHYS_TO_DMA
select SWIOTLB
select MFD_STA2X11
select GPIOLIB

View File

@ -6,9 +6,6 @@ struct dev_archdata {
#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU)
void *iommu; /* hook for IOMMU specific extension */
#endif
#ifdef CONFIG_STA2X11
bool is_sta2x11;
#endif
};
#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)

View File

@ -1,9 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASM_X86_DMA_DIRECT_H
#define ASM_X86_DMA_DIRECT_H 1
bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
#endif /* ASM_X86_DMA_DIRECT_H */

View File

@ -185,13 +185,13 @@ static void iommu_full(struct device *dev, size_t size, int dir)
static inline int
need_iommu(struct device *dev, unsigned long addr, size_t size)
{
return force_iommu || !dma_capable(dev, addr, size);
return force_iommu || !dma_capable(dev, addr, size, true);
}
static inline int
nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
{
return !dma_capable(dev, addr, size);
return !dma_capable(dev, addr, size, true);
}
/* Map a single continuous physical area into the IOMMU.

View File

@ -140,7 +140,7 @@ rootfs_initcall(pci_iommu_init);
static int via_no_dac_cb(struct pci_dev *pdev, void *data)
{
pdev->dev.bus_dma_mask = DMA_BIT_MASK(32);
pdev->dev.bus_dma_limit = DMA_BIT_MASK(32);
return 0;
}

View File

@ -367,7 +367,7 @@ bool force_dma_unencrypted(struct device *dev)
if (sme_active()) {
u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
dev->bus_dma_mask);
dev->bus_dma_limit);
if (dma_dev_mask <= dma_enc_mask)
return true;

View File

@ -30,7 +30,6 @@ struct sta2x11_ahb_regs { /* saved during suspend */
};
struct sta2x11_mapping {
u32 amba_base;
int is_suspended;
struct sta2x11_ahb_regs regs[STA2X11_NR_FUNCS];
};
@ -92,18 +91,6 @@ static int sta2x11_pdev_to_ep(struct pci_dev *pdev)
return pdev->bus->number - instance->bus0;
}
static struct sta2x11_mapping *sta2x11_pdev_to_mapping(struct pci_dev *pdev)
{
struct sta2x11_instance *instance;
int ep;
instance = sta2x11_pdev_to_instance(pdev);
if (!instance)
return NULL;
ep = sta2x11_pdev_to_ep(pdev);
return instance->map + ep;
}
/* This is exported, as some devices need to access the MFD registers */
struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev)
{
@ -111,39 +98,6 @@ struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev)
}
EXPORT_SYMBOL(sta2x11_get_instance);
/**
* p2a - Translate physical address to STA2x11 AMBA address,
* used for DMA transfers to STA2x11
* @p: Physical address
* @pdev: PCI device (must be hosted within the connext)
*/
static dma_addr_t p2a(dma_addr_t p, struct pci_dev *pdev)
{
struct sta2x11_mapping *map;
dma_addr_t a;
map = sta2x11_pdev_to_mapping(pdev);
a = p + map->amba_base;
return a;
}
/**
* a2p - Translate STA2x11 AMBA address to physical address
* used for DMA transfers from STA2x11
* @a: STA2x11 AMBA address
* @pdev: PCI device (must be hosted within the connext)
*/
static dma_addr_t a2p(dma_addr_t a, struct pci_dev *pdev)
{
struct sta2x11_mapping *map;
dma_addr_t p;
map = sta2x11_pdev_to_mapping(pdev);
p = a - map->amba_base;
return p;
}
/* At setup time, we use our own ops if the device is a ConneXt one */
static void sta2x11_setup_pdev(struct pci_dev *pdev)
{
@ -151,70 +105,12 @@ static void sta2x11_setup_pdev(struct pci_dev *pdev)
if (!instance) /* either a sta2x11 bridge or another ST device */
return;
pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
pdev->dev.archdata.is_sta2x11 = true;
/* We must enable all devices as master, for audio DMA to work */
pci_set_master(pdev);
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, sta2x11_setup_pdev);
/*
* The following three functions are exported (used in swiotlb: FIXME)
*/
/**
* dma_capable - Check if device can manage DMA transfers (FIXME: kill it)
* @dev: device for a PCI device
* @addr: DMA address
* @size: DMA size
*/
bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
struct sta2x11_mapping *map;
if (!dev->archdata.is_sta2x11) {
if (!dev->dma_mask)
return false;
return addr + size - 1 <= *dev->dma_mask;
}
map = sta2x11_pdev_to_mapping(to_pci_dev(dev));
if (!map || (addr < map->amba_base))
return false;
if (addr + size >= map->amba_base + STA2X11_AMBA_SIZE) {
return false;
}
return true;
}
/**
* __phys_to_dma - Return the DMA AMBA address used for this STA2x11 device
* @dev: device for a PCI device
* @paddr: Physical address
*/
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
{
if (!dev->archdata.is_sta2x11)
return paddr;
return p2a(paddr, to_pci_dev(dev));
}
/**
* dma_to_phys - Return the physical address used for this STA2x11 DMA address
* @dev: device for a PCI device
* @daddr: STA2x11 AMBA DMA address
*/
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
{
if (!dev->archdata.is_sta2x11)
return daddr;
return a2p(daddr, to_pci_dev(dev));
}
/*
* At boot we must set up the mappings for the pcie-to-amba bridge.
* It involves device access, and the same happens at suspend/resume time
@ -234,12 +130,22 @@ phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
/* At probe time, enable mapping for each endpoint, using the pdev */
static void sta2x11_map_ep(struct pci_dev *pdev)
{
struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
struct sta2x11_instance *instance = sta2x11_pdev_to_instance(pdev);
struct device *dev = &pdev->dev;
u32 amba_base, max_amba_addr;
int i;
if (!map)
if (!instance)
return;
pci_read_config_dword(pdev, AHB_BASE(0), &map->amba_base);
pci_read_config_dword(pdev, AHB_BASE(0), &amba_base);
max_amba_addr = amba_base + STA2X11_AMBA_SIZE - 1;
dev->dma_pfn_offset = PFN_DOWN(-amba_base);
dev->bus_dma_limit = max_amba_addr;
pci_set_consistent_dma_mask(pdev, max_amba_addr);
pci_set_dma_mask(pdev, max_amba_addr);
/* Configure AHB mapping */
pci_write_config_dword(pdev, AHB_PEXLBASE(0), 0);
@ -253,13 +159,24 @@ static void sta2x11_map_ep(struct pci_dev *pdev)
dev_info(&pdev->dev,
"sta2x11: Map EP %i: AMBA address %#8x-%#8x\n",
sta2x11_pdev_to_ep(pdev), map->amba_base,
map->amba_base + STA2X11_AMBA_SIZE - 1);
sta2x11_pdev_to_ep(pdev), amba_base, max_amba_addr);
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, sta2x11_map_ep);
#ifdef CONFIG_PM /* Some register values must be saved and restored */
static struct sta2x11_mapping *sta2x11_pdev_to_mapping(struct pci_dev *pdev)
{
struct sta2x11_instance *instance;
int ep;
instance = sta2x11_pdev_to_instance(pdev);
if (!instance)
return NULL;
ep = sta2x11_pdev_to_ep(pdev);
return instance->map + ep;
}
static void suspend_mapping(struct pci_dev *pdev)
{
struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);

View File

@ -3,8 +3,10 @@ config XTENSA
def_bool y
select ARCH_32BIT_OFF_T
select ARCH_HAS_BINFMT_FLAT if !MMU
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_DMA_PREP_COHERENT if MMU
select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU
select ARCH_HAS_UNCACHED_SEGMENT if MMU
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_FRAME_POINTERS

View File

@ -65,31 +65,4 @@ extern void platform_calibrate_ccount (void);
*/
void cpu_reset(void) __attribute__((noreturn));
/*
* Memory caching is platform-dependent in noMMU xtensa configurations.
* The following set of functions should be implemented in platform code
* in order to enable coherent DMA memory operations when CONFIG_MMU is not
* enabled. Default implementations do nothing and issue a warning.
*/
/*
* Check whether p points to a cached memory.
*/
bool platform_vaddr_cached(const void *p);
/*
* Check whether p points to an uncached memory.
*/
bool platform_vaddr_uncached(const void *p);
/*
* Return pointer to an uncached view of the cached sddress p.
*/
void *platform_vaddr_to_uncached(void *p);
/*
* Return pointer to a cached view of the uncached sddress p.
*/
void *platform_vaddr_to_cached(void *p);
#endif /* _XTENSA_PLATFORM_H */

View File

@ -5,10 +5,11 @@
extra-y := head.o vmlinux.lds
obj-y := align.o coprocessor.o entry.o irq.o pci-dma.o platform.o process.o \
obj-y := align.o coprocessor.o entry.o irq.o platform.o process.o \
ptrace.o setup.o signal.o stacktrace.o syscall.o time.o traps.o \
vectors.o
obj-$(CONFIG_MMU) += pci-dma.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o

View File

@ -44,8 +44,8 @@ static void do_cache_op(phys_addr_t paddr, size_t size,
}
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
@ -62,8 +62,8 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
}
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
@ -81,122 +81,25 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
}
}
void arch_dma_prep_coherent(struct page *page, size_t size)
{
__invalidate_dcache_range((unsigned long)page_address(page), size);
}
/*
* Memory caching is platform-dependent in noMMU xtensa configurations.
* The following two functions should be implemented in platform code
* in order to enable coherent DMA memory operations when CONFIG_MMU is not
* enabled.
*/
#ifdef CONFIG_MMU
bool platform_vaddr_cached(const void *p)
{
unsigned long addr = (unsigned long)p;
return addr >= XCHAL_KSEG_CACHED_VADDR &&
addr - XCHAL_KSEG_CACHED_VADDR < XCHAL_KSEG_SIZE;
}
bool platform_vaddr_uncached(const void *p)
{
unsigned long addr = (unsigned long)p;
return addr >= XCHAL_KSEG_BYPASS_VADDR &&
addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE;
}
void *platform_vaddr_to_uncached(void *p)
void *uncached_kernel_address(void *p)
{
return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
}
void *platform_vaddr_to_cached(void *p)
void *cached_kernel_address(void *p)
{
return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
}
#else
bool __attribute__((weak)) platform_vaddr_cached(const void *p)
{
WARN_ONCE(1, "Default %s implementation is used\n", __func__);
return true;
}
bool __attribute__((weak)) platform_vaddr_uncached(const void *p)
{
WARN_ONCE(1, "Default %s implementation is used\n", __func__);
return false;
}
void __attribute__((weak)) *platform_vaddr_to_uncached(void *p)
{
WARN_ONCE(1, "Default %s implementation is used\n", __func__);
return p;
}
void __attribute__((weak)) *platform_vaddr_to_cached(void *p)
{
WARN_ONCE(1, "Default %s implementation is used\n", __func__);
return p;
}
#endif
/*
* Note: We assume that the full memory space is always mapped to 'kseg'
* Otherwise we have to use page attributes (not implemented).
*/
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t flag, unsigned long attrs)
{
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = NULL;
/* ignore region speicifiers */
flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
flag |= GFP_DMA;
if (gfpflags_allow_blocking(flag))
page = dma_alloc_from_contiguous(dev, count, get_order(size),
flag & __GFP_NOWARN);
if (!page)
page = alloc_pages(flag | __GFP_ZERO, get_order(size));
if (!page)
return NULL;
*handle = phys_to_dma(dev, page_to_phys(page));
#ifdef CONFIG_MMU
if (PageHighMem(page)) {
void *p;
p = dma_common_contiguous_remap(page, size,
pgprot_noncached(PAGE_KERNEL),
__builtin_return_address(0));
if (!p) {
if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size));
}
return p;
}
#endif
BUG_ON(!platform_vaddr_cached(page_address(page)));
__invalidate_dcache_range((unsigned long)page_address(page), size);
return platform_vaddr_to_uncached(page_address(page));
}
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page;
if (platform_vaddr_uncached(vaddr)) {
page = virt_to_page(platform_vaddr_to_cached(vaddr));
} else {
#ifdef CONFIG_MMU
dma_common_free_remap(vaddr, size);
#endif
page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
}
if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size));
}
#endif /* CONFIG_MMU */

View File

@ -1057,8 +1057,8 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
*/
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
{
u64 mask, dmaaddr = 0, size = 0, offset = 0;
int ret, msb;
u64 end, mask, dmaaddr = 0, size = 0, offset = 0;
int ret;
/*
* If @dev is expected to be DMA-capable then the bus code that created
@ -1085,19 +1085,13 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
}
if (!ret) {
msb = fls64(dmaaddr + size - 1);
/*
* Round-up to the power-of-two mask or set
* the mask to the whole 64-bit address space
* in case the DMA region covers the full
* memory window.
* Limit coherent and dma mask based on size retrieved from
* firmware.
*/
mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1;
/*
* Limit coherent and dma mask based on size
* retrieved from firmware.
*/
dev->bus_dma_mask = mask;
end = dmaaddr + size - 1;
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->bus_dma_limit = end;
dev->coherent_dma_mask = mask;
*dev->dma_mask = mask;
}

View File

@ -910,7 +910,7 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
* value, don't extend it here. This happens on STA2X11, for example.
*
* XXX: manipulating the DMA mask from platform code is completely
* bogus, platform code should use dev->bus_dma_mask instead..
* bogus, platform code should use dev->bus_dma_limit instead..
*/
if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32))
return 0;

View File

@ -405,8 +405,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
iova_len = roundup_pow_of_two(iova_len);
if (dev->bus_dma_mask)
dma_limit &= dev->bus_dma_mask;
dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, domain->geometry.aperture_end);
@ -659,7 +658,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
arch_sync_dma_for_cpu(dev, phys, size, dir);
arch_sync_dma_for_cpu(phys, size, dir);
}
static void iommu_dma_sync_single_for_device(struct device *dev,
@ -671,7 +670,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
arch_sync_dma_for_device(dev, phys, size, dir);
arch_sync_dma_for_device(phys, size, dir);
}
static void iommu_dma_sync_sg_for_cpu(struct device *dev,
@ -685,7 +684,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
return;
for_each_sg(sgl, sg, nelems, i)
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
}
static void iommu_dma_sync_sg_for_device(struct device *dev,
@ -699,7 +698,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
return;
for_each_sg(sgl, sg, nelems, i)
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
@ -714,7 +713,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
dma_handle =__iommu_dma_map(dev, phys, size, prot);
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
dma_handle != DMA_MAPPING_ERROR)
arch_sync_dma_for_device(dev, phys, size, dir);
arch_sync_dma_for_device(phys, size, dir);
return dma_handle;
}

View File

@ -93,7 +93,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
bool coherent;
unsigned long offset;
const struct iommu_ops *iommu;
u64 mask;
u64 mask, end;
ret = of_dma_get_range(np, &dma_addr, &paddr, &size);
if (ret < 0) {
@ -148,12 +148,13 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
* Limit coherent and dma mask based on size and default mask
* set by the driver.
*/
mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
end = dma_addr + size - 1;
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
/* ...but only set bus mask if we found valid dma-ranges earlier */
/* ...but only set bus limit if we found valid dma-ranges earlier */
if (!ret)
dev->bus_dma_mask = mask;
dev->bus_dma_limit = end;
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",

View File

@ -1410,10 +1410,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
if (hcd->self.uses_pio_for_control)
return ret;
if (hcd_uses_dma(hcd)) {
if (is_vmalloc_addr(urb->setup_packet)) {
WARN_ONCE(1, "setup packet is not dma capable\n");
return -EAGAIN;
} else if (object_is_on_stack(urb->setup_packet)) {
if (object_is_on_stack(urb->setup_packet)) {
WARN_ONCE(1, "setup packet is on stack\n");
return -EAGAIN;
}
@ -1479,9 +1476,6 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
ret = -EAGAIN;
else
urb->transfer_flags |= URB_DMA_MAP_PAGE;
} else if (is_vmalloc_addr(urb->transfer_buffer)) {
WARN_ONCE(1, "transfer buffer not dma capable\n");
ret = -EAGAIN;
} else if (object_is_on_stack(urb->transfer_buffer)) {
WARN_ONCE(1, "transfer buffer is on stack\n");
ret = -EAGAIN;

View File

@ -375,7 +375,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
if (dma_capable(dev, dev_addr, size) &&
if (dma_capable(dev, dev_addr, size, true) &&
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
swiotlb_force != SWIOTLB_FORCE)
@ -397,7 +397,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
/*
* Ensure that the address returned is DMA'ble
*/
if (unlikely(!dma_capable(dev, dev_addr, size))) {
if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
swiotlb_tbl_unmap_single(dev, map, size, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
return DMA_MAPPING_ERROR;
@ -405,7 +405,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
done:
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
xen_dma_sync_for_device(dev, dev_addr, phys, size, dir);
xen_dma_sync_for_device(dev_addr, phys, size, dir);
return dev_addr;
}
@ -425,7 +425,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir);
xen_dma_sync_for_cpu(dev_addr, paddr, size, dir);
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr))
@ -439,7 +439,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
phys_addr_t paddr = xen_bus_to_phys(dma_addr);
if (!dev_is_dma_coherent(dev))
xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir);
xen_dma_sync_for_cpu(dma_addr, paddr, size, dir);
if (is_xen_swiotlb_buffer(dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
@ -455,7 +455,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
if (!dev_is_dma_coherent(dev))
xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir);
xen_dma_sync_for_device(dma_addr, paddr, size, dir);
}
/*

View File

@ -1213,8 +1213,8 @@ struct dev_links_info {
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
* hardware supports 64-bit addresses for consistent allocations
* such descriptors.
* @bus_dma_mask: Mask of an upstream bridge or bus which imposes a smaller DMA
* limit than the device itself supports.
* @bus_dma_limit: Limit of an upstream bridge or bus which imposes a smaller
* DMA limit than the device itself supports.
* @dma_pfn_offset: offset of DMA memory range relatively of RAM
* @dma_parms: A low level driver may set these to teach IOMMU code about
* segment limitations.
@ -1300,7 +1300,7 @@ struct device {
not all hardware supports
64 bit addresses for consistent
allocations such descriptors. */
u64 bus_dma_mask; /* upstream dma_mask constraint */
u64 bus_dma_limit; /* upstream dma constraint */
unsigned long dma_pfn_offset;
struct device_dma_parameters *dma_parms;

View File

@ -3,6 +3,7 @@
#define _LINUX_DMA_DIRECT_H 1
#include <linux/dma-mapping.h>
#include <linux/memblock.h> /* for min_low_pfn */
#include <linux/mem_encrypt.h>
extern unsigned int zone_dma_bits;
@ -23,15 +24,6 @@ static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
}
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
return false;
return addr + size - 1 <=
min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
}
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
@ -59,6 +51,21 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
return __sme_clr(__dma_to_phys(dev, daddr));
}
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
bool is_ram)
{
dma_addr_t end = addr + size - 1;
if (!dev->dma_mask)
return false;
if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
return false;
return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
}
u64 dma_direct_get_required_mask(struct device *dev);
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
@ -69,7 +76,13 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page);
gfp_t gfp, unsigned long attrs);
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
bool dma_direct_can_mmap(struct device *dev);
int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
int dma_direct_supported(struct device *dev, u64 mask);
#endif /* _LINUX_DMA_DIRECT_H */

View File

@ -159,7 +159,7 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, size_t size, int *ret);
void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle);
int dma_release_from_global_coherent(int order, void *vaddr);
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
size_t size, int *ret);
@ -169,7 +169,7 @@ int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
static inline void *dma_alloc_from_global_coherent(ssize_t size,
static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle)
{
return NULL;
@ -580,6 +580,10 @@ static inline unsigned long dma_get_merge_boundary(struct device *dev)
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
/* DMA must never operate on areas that might be remapped. */
if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
"rejecting DMA map of vmalloc memory\n"))
return DMA_MAPPING_ERROR;
debug_dma_map_single(dev, ptr, size);
return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
size, dir, attrs);
@ -690,7 +694,7 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
*/
static inline bool dma_addressing_limited(struct device *dev)
{
return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) <
return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
dma_get_required_mask(dev);
}

View File

@ -41,8 +41,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr);
#ifdef CONFIG_MMU
/*
@ -75,29 +73,29 @@ static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir);
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir);
#else
static inline void arch_sync_dma_for_device(struct device *dev,
phys_addr_t paddr, size_t size, enum dma_data_direction dir)
static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
}
#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir);
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir);
#else
static inline void arch_sync_dma_for_cpu(struct device *dev,
phys_addr_t paddr, size_t size, enum dma_data_direction dir)
static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
}
#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
void arch_sync_dma_for_cpu_all(struct device *dev);
void arch_sync_dma_for_cpu_all(void);
#else
static inline void arch_sync_dma_for_cpu_all(struct device *dev)
static inline void arch_sync_dma_for_cpu_all(void)
{
}
#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */

View File

@ -4,10 +4,10 @@
#include <linux/swiotlb.h>
void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
phys_addr_t paddr, size_t size, enum dma_data_direction dir);
void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
phys_addr_t paddr, size_t size, enum dma_data_direction dir);
void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size,
enum dma_data_direction dir);
void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size,
enum dma_data_direction dir);
extern int xen_swiotlb_init(int verbose, bool early);
extern const struct dma_map_ops xen_swiotlb_dma_ops;

View File

@ -51,9 +51,6 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
config ARCH_HAS_DMA_PREP_COHERENT
bool
config ARCH_HAS_DMA_COHERENT_TO_PFN
bool
config ARCH_HAS_FORCE_DMA_UNENCRYPTED
bool
@ -68,9 +65,18 @@ config SWIOTLB
bool
select NEED_DMA_MAP_STATE
#
# Should be selected if we can mmap non-coherent mappings to userspace.
# The only thing that is really required is a way to set an uncached bit
# in the pagetables
#
config DMA_NONCOHERENT_MMAP
bool
config DMA_REMAP
depends on MMU
select GENERIC_ALLOCATOR
select DMA_NONCOHERENT_MMAP
bool
config DMA_DIRECT_REMAP

View File

@ -123,8 +123,9 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
return ret;
}
static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
ssize_t size, dma_addr_t *dma_handle)
static void *__dma_alloc_from_coherent(struct device *dev,
struct dma_coherent_mem *mem,
ssize_t size, dma_addr_t *dma_handle)
{
int order = get_order(size);
unsigned long flags;
@ -143,7 +144,7 @@ static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
/*
* Memory was found in the coherent area.
*/
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
*dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
ret = mem->virt_base + (pageno << PAGE_SHIFT);
spin_unlock_irqrestore(&mem->spinlock, flags);
memset(ret, 0, size);
@ -175,17 +176,18 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
if (!mem)
return 0;
*ret = __dma_alloc_from_coherent(mem, size, dma_handle);
*ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
return 1;
}
void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle)
{
if (!dma_coherent_default_memory)
return NULL;
return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
dma_handle);
return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
dma_handle);
}
static int __dma_release_from_coherent(struct dma_coherent_mem *mem,

View File

@ -42,10 +42,11 @@ struct cma *dma_contiguous_default_area;
* Users, who want to set the size of global CMA area for their system
* should use cma= kernel parameter.
*/
static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
static phys_addr_t size_cmdline = -1;
static phys_addr_t base_cmdline;
static phys_addr_t limit_cmdline;
static const phys_addr_t size_bytes __initconst =
(phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
static phys_addr_t size_cmdline __initdata = -1;
static phys_addr_t base_cmdline __initdata;
static phys_addr_t limit_cmdline __initdata;
static int __init early_cma(char *p)
{

View File

@ -27,7 +27,7 @@
#include <asm/sections.h>
#define HASH_SIZE 1024ULL
#define HASH_SIZE 16384ULL
#define HASH_FN_SHIFT 13
#define HASH_FN_MASK (HASH_SIZE - 1)
@ -54,40 +54,40 @@ enum map_err_types {
* struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
* @list: node on pre-allocated free_entries list
* @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
* @type: single, page, sg, coherent
* @pfn: page frame of the start address
* @offset: offset of mapping relative to pfn
* @size: length of the mapping
* @type: single, page, sg, coherent
* @direction: enum dma_data_direction
* @sg_call_ents: 'nents' from dma_map_sg
* @sg_mapped_ents: 'mapped_ents' from dma_map_sg
* @pfn: page frame of the start address
* @offset: offset of mapping relative to pfn
* @map_err_type: track whether dma_mapping_error() was checked
* @stacktrace: support backtraces when a violation is detected
*/
struct dma_debug_entry {
struct list_head list;
struct device *dev;
int type;
unsigned long pfn;
size_t offset;
u64 dev_addr;
u64 size;
int type;
int direction;
int sg_call_ents;
int sg_mapped_ents;
unsigned long pfn;
size_t offset;
enum map_err_types map_err_type;
#ifdef CONFIG_STACKTRACE
unsigned int stack_len;
unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
#endif
};
} ____cacheline_aligned_in_smp;
typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
struct hash_bucket {
struct list_head list;
spinlock_t lock;
} ____cacheline_aligned_in_smp;
};
/* Hash list to save the allocated dma addresses */
static struct hash_bucket dma_entry_hash[HASH_SIZE];
@ -255,12 +255,10 @@ static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
* Give up exclusive access to the hash bucket
*/
static void put_hash_bucket(struct hash_bucket *bucket,
unsigned long *flags)
unsigned long flags)
__releases(&bucket->lock)
{
unsigned long __flags = *flags;
spin_unlock_irqrestore(&bucket->lock, __flags);
spin_unlock_irqrestore(&bucket->lock, flags);
}
static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
@ -359,7 +357,7 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
/*
* Nothing found, go back a hash bucket
*/
put_hash_bucket(*bucket, flags);
put_hash_bucket(*bucket, *flags);
range += (1 << HASH_FN_SHIFT);
index.dev_addr -= (1 << HASH_FN_SHIFT);
*bucket = get_hash_bucket(&index, flags);
@ -420,6 +418,7 @@ void debug_dma_dump_mappings(struct device *dev)
}
spin_unlock_irqrestore(&bucket->lock, flags);
cond_resched();
}
}
@ -608,7 +607,7 @@ static void add_dma_entry(struct dma_debug_entry *entry)
bucket = get_hash_bucket(entry, &flags);
hash_bucket_add(bucket, entry);
put_hash_bucket(bucket, &flags);
put_hash_bucket(bucket, flags);
rc = active_cacheline_insert(entry);
if (rc == -ENOMEM) {
@ -1001,7 +1000,7 @@ static void check_unmap(struct dma_debug_entry *ref)
if (!entry) {
/* must drop lock before calling dma_mapping_error */
put_hash_bucket(bucket, &flags);
put_hash_bucket(bucket, flags);
if (dma_mapping_error(ref->dev, ref->dev_addr)) {
err_printk(ref->dev, NULL,
@ -1083,7 +1082,7 @@ static void check_unmap(struct dma_debug_entry *ref)
hash_bucket_del(entry);
dma_entry_free(entry);
put_hash_bucket(bucket, &flags);
put_hash_bucket(bucket, flags);
}
static void check_for_stack(struct device *dev,
@ -1203,7 +1202,7 @@ static void check_sync(struct device *dev,
}
out:
put_hash_bucket(bucket, &flags);
put_hash_bucket(bucket, flags);
}
static void check_sg_segment(struct device *dev, struct scatterlist *sg)
@ -1318,7 +1317,7 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
}
}
put_hash_bucket(bucket, &flags);
put_hash_bucket(bucket, flags);
}
EXPORT_SYMBOL(debug_dma_mapping_error);
@ -1391,7 +1390,7 @@ static int get_nr_mapped_entries(struct device *dev,
if (entry)
mapped_ents = entry->sg_mapped_ents;
put_hash_bucket(bucket, &flags);
put_hash_bucket(bucket, flags);
return mapped_ents;
}

View File

@ -12,6 +12,7 @@
#include <linux/dma-contiguous.h>
#include <linux/dma-noncoherent.h>
#include <linux/pfn.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h>
#include <linux/swiotlb.h>
@ -26,10 +27,10 @@ static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
{
if (!dev->dma_mask) {
dev_err_once(dev, "DMA map on device without dma_mask\n");
} else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) {
} else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_limit) {
dev_err_once(dev,
"overflow %pad+%zu of DMA mask %llx bus mask %llx\n",
&dma_addr, size, *dev->dma_mask, dev->bus_dma_mask);
"overflow %pad+%zu of DMA mask %llx bus limit %llx\n",
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
}
WARN_ON_ONCE(1);
}
@ -42,6 +43,12 @@ static inline dma_addr_t phys_to_dma_direct(struct device *dev,
return phys_to_dma(dev, phys);
}
static inline struct page *dma_direct_to_page(struct device *dev,
dma_addr_t dma_addr)
{
return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
}
u64 dma_direct_get_required_mask(struct device *dev)
{
u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
@ -50,15 +57,14 @@ u64 dma_direct_get_required_mask(struct device *dev)
}
static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
u64 *phys_mask)
u64 *phys_limit)
{
if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
dma_mask = dev->bus_dma_mask;
u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
if (force_dma_unencrypted(dev))
*phys_mask = __dma_to_phys(dev, dma_mask);
*phys_limit = __dma_to_phys(dev, dma_limit);
else
*phys_mask = dma_to_phys(dev, dma_mask);
*phys_limit = dma_to_phys(dev, dma_limit);
/*
* Optimistically try the zone that the physical address mask falls
@ -68,9 +74,9 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
* Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
* zones.
*/
if (*phys_mask <= DMA_BIT_MASK(zone_dma_bits))
if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
return GFP_DMA;
if (*phys_mask <= DMA_BIT_MASK(32))
if (*phys_limit <= DMA_BIT_MASK(32))
return GFP_DMA32;
return 0;
}
@ -78,16 +84,16 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
{
return phys_to_dma_direct(dev, phys) + size - 1 <=
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
}
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
gfp_t gfp, unsigned long attrs)
{
size_t alloc_size = PAGE_ALIGN(size);
int node = dev_to_node(dev);
struct page *page = NULL;
u64 phys_mask;
u64 phys_limit;
if (attrs & DMA_ATTR_NO_WARN)
gfp |= __GFP_NOWARN;
@ -95,7 +101,7 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
/* we always manually zero the memory once we are done: */
gfp &= ~__GFP_ZERO;
gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_mask);
&phys_limit);
page = dma_alloc_contiguous(dev, alloc_size, gfp);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, alloc_size);
@ -109,7 +115,7 @@ again:
page = NULL;
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
phys_mask < DMA_BIT_MASK(64) &&
phys_limit < DMA_BIT_MASK(64) &&
!(gfp & (GFP_DMA32 | GFP_DMA))) {
gfp |= GFP_DMA32;
goto again;
@ -130,7 +136,16 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
struct page *page;
void *ret;
page = __dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_alloc_need_uncached(dev, attrs) &&
!gfpflags_allow_blocking(gfp)) {
ret = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
if (!ret)
return NULL;
goto done;
}
page = __dma_direct_alloc_pages(dev, size, gfp, attrs);
if (!page)
return NULL;
@ -139,9 +154,28 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
/* remove any dirty cache lines on the kernel alias */
if (!PageHighMem(page))
arch_dma_prep_coherent(page, size);
*dma_handle = phys_to_dma(dev, page_to_phys(page));
/* return the page pointer as the opaque cookie */
return page;
ret = page;
goto done;
}
if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_alloc_need_uncached(dev, attrs)) ||
(IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
/* remove any dirty cache lines on the kernel alias */
arch_dma_prep_coherent(page, PAGE_ALIGN(size));
/* create a coherent mapping */
ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
dma_pgprot(dev, PAGE_KERNEL, attrs),
__builtin_return_address(0));
if (!ret) {
dma_free_contiguous(dev, page, size);
return ret;
}
memset(ret, 0, size);
goto done;
}
if (PageHighMem(page)) {
@ -152,17 +186,14 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
* so log an error and fail.
*/
dev_info(dev, "Rejecting highmem page from CMA.\n");
__dma_direct_free_pages(dev, size, page);
dma_free_contiguous(dev, page, size);
return NULL;
}
ret = page_address(page);
if (force_dma_unencrypted(dev)) {
if (force_dma_unencrypted(dev))
set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
*dma_handle = __phys_to_dma(dev, page_to_phys(page));
} else {
*dma_handle = phys_to_dma(dev, page_to_phys(page));
}
memset(ret, 0, size);
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
@ -170,15 +201,14 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
arch_dma_prep_coherent(page, size);
ret = uncached_kernel_address(ret);
}
done:
if (force_dma_unencrypted(dev))
*dma_handle = __phys_to_dma(dev, page_to_phys(page));
else
*dma_handle = phys_to_dma(dev, page_to_phys(page));
return ret;
}
void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
{
dma_free_contiguous(dev, page, size);
}
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
@ -187,23 +217,28 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
__dma_direct_free_pages(dev, size, cpu_addr);
dma_free_contiguous(dev, cpu_addr, size);
return;
}
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_free_from_pool(cpu_addr, PAGE_ALIGN(size)))
return;
if (force_dma_unencrypted(dev))
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
dma_alloc_need_uncached(dev, attrs))
cpu_addr = cached_kernel_address(cpu_addr);
__dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
vunmap(cpu_addr);
dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
}
void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_alloc_need_uncached(dev, attrs))
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
@ -213,6 +248,7 @@ void dma_direct_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
{
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_alloc_need_uncached(dev, attrs))
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
else
@ -230,7 +266,7 @@ void dma_direct_sync_single_for_device(struct device *dev,
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(dev, paddr, size, dir);
arch_sync_dma_for_device(paddr, size, dir);
}
EXPORT_SYMBOL(dma_direct_sync_single_for_device);
@ -248,7 +284,7 @@ void dma_direct_sync_sg_for_device(struct device *dev,
dir, SYNC_FOR_DEVICE);
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(dev, paddr, sg->length,
arch_sync_dma_for_device(paddr, sg->length,
dir);
}
}
@ -264,8 +300,8 @@ void dma_direct_sync_single_for_cpu(struct device *dev,
phys_addr_t paddr = dma_to_phys(dev, addr);
if (!dev_is_dma_coherent(dev)) {
arch_sync_dma_for_cpu(dev, paddr, size, dir);
arch_sync_dma_for_cpu_all(dev);
arch_sync_dma_for_cpu(paddr, size, dir);
arch_sync_dma_for_cpu_all();
}
if (unlikely(is_swiotlb_buffer(paddr)))
@ -283,7 +319,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(dev, paddr, sg->length, dir);
arch_sync_dma_for_cpu(paddr, sg->length, dir);
if (unlikely(is_swiotlb_buffer(paddr)))
swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
@ -291,7 +327,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
}
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu_all(dev);
arch_sync_dma_for_cpu_all();
}
EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu);
@ -325,7 +361,7 @@ static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
size_t size)
{
return swiotlb_force != SWIOTLB_FORCE &&
dma_capable(dev, dma_addr, size);
dma_capable(dev, dma_addr, size, true);
}
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
@ -342,7 +378,7 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
}
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
arch_sync_dma_for_device(dev, phys, size, dir);
arch_sync_dma_for_device(phys, size, dir);
return dma_addr;
}
EXPORT_SYMBOL(dma_direct_map_page);
@ -374,7 +410,7 @@ dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
{
dma_addr_t dma_addr = paddr;
if (unlikely(!dma_direct_possible(dev, dma_addr, size))) {
if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
report_addr(dev, dma_addr, size);
return DMA_MAPPING_ERROR;
}
@ -383,6 +419,59 @@ dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
}
EXPORT_SYMBOL(dma_direct_map_resource);
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
struct page *page = dma_direct_to_page(dev, dma_addr);
int ret;
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (!ret)
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
return ret;
}
#ifdef CONFIG_MMU
bool dma_direct_can_mmap(struct device *dev)
{
return dev_is_dma_coherent(dev) ||
IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
}
int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
int ret = -ENXIO;
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
return -ENXIO;
return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
user_count << PAGE_SHIFT, vma->vm_page_prot);
}
#else /* CONFIG_MMU */
bool dma_direct_can_mmap(struct device *dev)
{
return false;
}
int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
return -ENXIO;
}
#endif /* CONFIG_MMU */
/*
* Because 32-bit DMA masks are so common we expect every architecture to be
* able to satisfy them - either by not supporting more physical memory, or by

View File

@ -112,24 +112,9 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
struct page *page;
struct page *page = virt_to_page(cpu_addr);
int ret;
if (!dev_is_dma_coherent(dev)) {
unsigned long pfn;
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
return -ENXIO;
/* If the PFN is not valid, we do not have a struct page */
pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
if (!pfn_valid(pfn))
return -ENXIO;
page = pfn_to_page(pfn);
} else {
page = virt_to_page(cpu_addr);
}
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (!ret)
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
@ -154,7 +139,7 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
const struct dma_map_ops *ops = get_dma_ops(dev);
if (dma_is_direct(ops))
return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr,
return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
size, attrs);
if (!ops->get_sgtable)
return -ENXIO;
@ -192,7 +177,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
unsigned long pfn;
int ret = -ENXIO;
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
@ -203,19 +187,8 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
if (off >= count || user_count > count - off)
return -ENXIO;
if (!dev_is_dma_coherent(dev)) {
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
return -ENXIO;
/* If the PFN is not valid, we do not have a struct page */
pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
if (!pfn_valid(pfn))
return -ENXIO;
} else {
pfn = page_to_pfn(virt_to_page(cpu_addr));
}
return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
return remap_pfn_range(vma, vma->vm_start,
page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
user_count << PAGE_SHIFT, vma->vm_page_prot);
#else
return -ENXIO;
@ -233,12 +206,8 @@ bool dma_can_mmap(struct device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
if (dma_is_direct(ops)) {
return IS_ENABLED(CONFIG_MMU) &&
(dev_is_dma_coherent(dev) ||
IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN));
}
if (dma_is_direct(ops))
return dma_direct_can_mmap(dev);
return ops->mmap != NULL;
}
EXPORT_SYMBOL_GPL(dma_can_mmap);
@ -263,7 +232,7 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
const struct dma_map_ops *ops = get_dma_ops(dev);
if (dma_is_direct(ops))
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size,
return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
attrs);
if (!ops->mmap)
return -ENXIO;

View File

@ -210,59 +210,4 @@ bool dma_free_from_pool(void *start, size_t size)
gen_pool_free(atomic_pool, (unsigned long)start, size);
return true;
}
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flags, unsigned long attrs)
{
struct page *page = NULL;
void *ret;
size = PAGE_ALIGN(size);
if (!gfpflags_allow_blocking(flags)) {
ret = dma_alloc_from_pool(size, &page, flags);
if (!ret)
return NULL;
goto done;
}
page = __dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
if (!page)
return NULL;
/* remove any dirty cache lines on the kernel alias */
arch_dma_prep_coherent(page, size);
/* create a coherent mapping */
ret = dma_common_contiguous_remap(page, size,
dma_pgprot(dev, PAGE_KERNEL, attrs),
__builtin_return_address(0));
if (!ret) {
__dma_direct_free_pages(dev, size, page);
return ret;
}
memset(ret, 0, size);
done:
*dma_handle = phys_to_dma(dev, page_to_phys(page));
return ret;
}
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
if (!dma_free_from_pool(vaddr, PAGE_ALIGN(size))) {
phys_addr_t phys = dma_to_phys(dev, dma_handle);
struct page *page = pfn_to_page(__phys_to_pfn(phys));
vunmap(vaddr);
__dma_direct_free_pages(dev, size, page);
}
}
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr)
{
return __phys_to_pfn(dma_to_phys(dev, dma_addr));
}
#endif /* CONFIG_DMA_DIRECT_REMAP */

View File

@ -678,7 +678,7 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
/* Ensure that the address returned is DMA'ble */
*dma_addr = __phys_to_dma(dev, *phys);
if (unlikely(!dma_capable(dev, *dma_addr, size))) {
if (unlikely(!dma_capable(dev, *dma_addr, size, true))) {
swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
return false;