dma-mapping fixes for 5.10:

- document the new document dma_{alloc,free}_pages API
  - two fixups for the dma-mapping.h split
 -----BEGIN PGP SIGNATURE-----
 
 iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAl+UN1ELHGhjaEBsc3Qu
 ZGUACgkQD55TZVIEUYPhkg//QLMwY4Lib1IPdeYR4k/UE2E4YtPjQghucNHXDkrd
 jjV5tvGPfNDOEKeCOamivTcOmc3E+jFhQFjpHplqd4OjsfZT3EyXaopdX/qN1Bbo
 JIib0WAAxO1N2MRhEPJzGAsCkMPT32Q52/ka1QUOmr1E8VPrKhU4T9+FnTbj1rgF
 HbMk+PdV4+HP53CvK+aaOfNHqJqQoTBeCx9xebybAjxIBCI+LedRwC7haV4Zz6tg
 xSp9cW0Ztdp9U7u1dOO4gEqnL/fNk3+RWF5iwtyCi96uYmguV+/vAqpWMyej97q5
 2Dx0jTQvj0FhnPug9asydadjtUqkzfRCSDGv4TybeHT/OZJEGAwkdJG7V/5PwGOg
 VCMpqi/WRIDPnUtN3OY4IZFigbyb4wJ6MOO/hvXagC7Lc2+z9ZhuUKUjSsV90LoT
 2a4xwm9M1JAglYbhGvLl5cjzmDSdCFXuGYlJ18lRZx7d4cGi34hAqq3WfqqteHm+
 IRfeAaWN7N+W8PgzGaDqfUVDrGNVZ7eo02kVicaJFCdJE5ecS3rUbyU8uVjhX7Sl
 h8zwBs8/5hFIKLCWUBiT+UBmvWXbG/a0plRh/vIvJ8lk4m4+kwdTRwgngpSkb3G/
 ytAJPZTeI7r75zkwxTHPE01Khf8/qWJ3cdv97PpQH+7mlo4J0XUr5ssmiQ7DAHuu
 jjo=
 =0N7Y
 -----END PGP SIGNATURE-----

Merge tag 'dma-mapping-5.10-1' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:

 - document the new dma_{alloc,free}_pages() API

 - two fixups for the dma-mapping.h split

* tag 'dma-mapping-5.10-1' of git://git.infradead.org/users/hch/dma-mapping:
  dma-mapping: document dma_{alloc,free}_pages
  dma-mapping: move more functions to dma-map-ops.h
  ARM/sa1111: add a missing include of dma-map-ops.h
This commit is contained in:
Linus Torvalds 2020-10-24 12:17:05 -07:00
commit 1b307ac870
5 changed files with 68 additions and 32 deletions

View file

@ -519,10 +519,9 @@ routines, e.g.:::
Part II - Non-coherent DMA allocations
--------------------------------------
These APIs allow to allocate pages in the kernel direct mapping that are
guaranteed to be DMA addressable. This means that unlike dma_alloc_coherent,
virt_to_page can be called on the resulting address, and the resulting
struct page can be used for everything a struct page is suitable for.
These APIs allow to allocate pages that are guaranteed to be DMA addressable
by the passed in device, but which need explicit management of memory ownership
for the kernel vs the device.
If you don't understand how cache line coherency works between a processor and
an I/O device, you should not be using this part of the API.
@ -537,7 +536,7 @@ an I/O device, you should not be using this part of the API.
This routine allocates a region of <size> bytes of consistent memory. It
returns a pointer to the allocated region (in the processor's virtual address
space) or NULL if the allocation failed. The returned memory may or may not
be in the kernels direct mapping. Drivers must not call virt_to_page on
be in the kernel direct mapping. Drivers must not call virt_to_page on
the returned memory region.
It also returns a <dma_handle> which may be cast to an unsigned integer the
@ -565,7 +564,45 @@ reused.
Free a region of memory previously allocated using dma_alloc_noncoherent().
dev, size and dma_handle and dir must all be the same as those passed into
dma_alloc_noncoherent(). cpu_addr must be the virtual address returned by
the dma_alloc_noncoherent().
dma_alloc_noncoherent().
::
struct page *
dma_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle,
enum dma_data_direction dir, gfp_t gfp)
This routine allocates a region of <size> bytes of non-coherent memory. It
returns a pointer to first struct page for the region, or NULL if the
allocation failed. The resulting struct page can be used for everything a
struct page is suitable for.
It also returns a <dma_handle> which may be cast to an unsigned integer the
same width as the bus and given to the device as the DMA address base of
the region.
The dir parameter specified if data is read and/or written by the device,
see dma_map_single() for details.
The gfp parameter allows the caller to specify the ``GFP_`` flags (see
kmalloc()) for the allocation, but rejects flags used to specify a memory
zone such as GFP_DMA or GFP_HIGHMEM.
Before giving the memory to the device, dma_sync_single_for_device() needs
to be called, and before reading memory written by the device,
dma_sync_single_for_cpu(), just like for streaming DMA mappings that are
reused.
::
void
dma_free_pages(struct device *dev, size_t size, struct page *page,
dma_addr_t dma_handle, enum dma_data_direction dir)
Free a region of memory previously allocated using dma_alloc_pages().
dev, size and dma_handle and dir must all be the same as those passed into
dma_alloc_noncoherent(). page must be the pointer returned by
dma_alloc_pages().
::

View file

@ -22,7 +22,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/dma-map-ops.h>
#include <linux/clk.h>
#include <linux/io.h>

View file

@ -203,6 +203,29 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
}
#endif /* CONFIG_DMA_DECLARE_COHERENT */
int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
struct page *dma_common_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
dma_addr_t dma_handle, enum dma_data_direction dir);
struct page **dma_common_find_pages(void *cpu_addr);
void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot,
const void *caller);
void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot,
const void *caller);
void dma_common_free_remap(void *cpu_addr, size_t size);
struct page *dma_alloc_from_pool(struct device *dev, size_t size,
void **cpu_addr, gfp_t flags,
bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
bool dma_free_from_pool(struct device *dev, void *start, size_t size);
#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
#include <asm/dma-coherence.h>
#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \

View file

@ -389,30 +389,6 @@ static inline void dma_sync_sgtable_for_device(struct device *dev,
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
struct page *dma_common_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
dma_addr_t dma_handle, enum dma_data_direction dir);
struct page **dma_common_find_pages(void *cpu_addr);
void *dma_common_contiguous_remap(struct page *page, size_t size,
pgprot_t prot, const void *caller);
void *dma_common_pages_remap(struct page **pages, size_t size,
pgprot_t prot, const void *caller);
void dma_common_free_remap(void *cpu_addr, size_t size);
struct page *dma_alloc_from_pool(struct device *dev, size_t size,
void **cpu_addr, gfp_t flags,
bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
bool dma_free_from_pool(struct device *dev, void *start, size_t size);
int
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
dma_addr_t dma_addr, size_t size, unsigned long attrs);
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{

View file

@ -2,7 +2,7 @@
/*
* Copyright (c) 2014 The Linux Foundation
*/
#include <linux/dma-mapping.h>
#include <linux/dma-map-ops.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>