dma-mapping: force bouncing if the kmalloc() size is not cache-line-aligned

For direct DMA, if the size is small enough to have originated from a
kmalloc() cache below ARCH_DMA_MINALIGN, check its alignment against
dma_get_cache_alignment() and bounce if necessary.  For larger sizes, it
is the responsibility of the DMA API caller to ensure proper alignment.

At this point, the kmalloc() caches are properly aligned but this will
change in a subsequent patch.

Architectures can opt in by selecting DMA_BOUNCE_UNALIGNED_KMALLOC.

Link: https://lkml.kernel.org/r/20230612153201.554742-15-catalin.marinas@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Tested-by: Isaac J. Manjarres <isaacmanjarres@google.com>
Cc: Alasdair Kergon <agk@redhat.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Jerry Snitselaar <jsnitsel@redhat.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Jonathan Cameron <jic23@kernel.org>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Lars-Peter Clausen <lars@metafoo.de>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Mike Snitzer <snitzer@kernel.org>
Cc: "Rafael J. Wysocki" <rafael@kernel.org>
Cc: Saravana Kannan <saravanak@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Catalin Marinas 2023-06-12 16:31:58 +01:00 committed by Andrew Morton
parent cb147bbe22
commit 370645f41e
3 changed files with 67 additions and 1 deletions

View file

@ -8,6 +8,7 @@
#include <linux/dma-mapping.h>
#include <linux/pgtable.h>
#include <linux/slab.h>
struct cma;
@ -277,6 +278,66 @@ static inline bool dev_is_dma_coherent(struct device *dev)
}
#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
/*
* Check whether potential kmalloc() buffers are safe for non-coherent DMA.
*/
static inline bool dma_kmalloc_safe(struct device *dev,
enum dma_data_direction dir)
{
/*
* If DMA bouncing of kmalloc() buffers is disabled, the kmalloc()
* caches have already been aligned to a DMA-safe size.
*/
if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC))
return true;
/*
* kmalloc() buffers are DMA-safe irrespective of size if the device
* is coherent or the direction is DMA_TO_DEVICE (non-desctructive
* cache maintenance and benign cache line evictions).
*/
if (dev_is_dma_coherent(dev) || dir == DMA_TO_DEVICE)
return true;
return false;
}
/*
* Check whether the given size, assuming it is for a kmalloc()'ed buffer, is
* sufficiently aligned for non-coherent DMA.
*/
static inline bool dma_kmalloc_size_aligned(size_t size)
{
/*
* Larger kmalloc() sizes are guaranteed to be aligned to
* ARCH_DMA_MINALIGN.
*/
if (size >= 2 * ARCH_DMA_MINALIGN ||
IS_ALIGNED(kmalloc_size_roundup(size), dma_get_cache_alignment()))
return true;
return false;
}
/*
* Check whether the given object size may have originated from a kmalloc()
* buffer with a slab alignment below the DMA-safe alignment and needs
* bouncing for non-coherent DMA. The pointer alignment is not considered and
* in-structure DMA-safe offsets are the responsibility of the caller. Such
* code should use the static ARCH_DMA_MINALIGN for compiler annotations.
*
* The heuristics can have false positives, bouncing unnecessarily, though the
* buffers would be small. False negatives are theoretically possible if, for
* example, multiple small kmalloc() buffers are coalesced into a larger
* buffer that passes the alignment check. There are no such known constructs
* in the kernel.
*/
static inline bool dma_kmalloc_needs_bounce(struct device *dev, size_t size,
enum dma_data_direction dir)
{
return !dma_kmalloc_safe(dev, dir) && !dma_kmalloc_size_aligned(size);
}
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,

View file

@ -90,6 +90,10 @@ config SWIOTLB
bool
select NEED_DMA_MAP_STATE
config DMA_BOUNCE_UNALIGNED_KMALLOC
bool
depends on SWIOTLB
config DMA_RESTRICTED_POOL
bool "DMA Restricted Pool"
depends on OF && OF_RESERVED_MEM && SWIOTLB

View file

@ -94,7 +94,8 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
return swiotlb_map(dev, phys, size, dir, attrs);
}
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
if (unlikely(!dma_capable(dev, dma_addr, size, true)) ||
dma_kmalloc_needs_bounce(dev, size, dir)) {
if (is_pci_p2pdma_page(page))
return DMA_MAPPING_ERROR;
if (is_swiotlb_active(dev))