mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-25 11:55:37 +00:00
f1304f9763
commit c50f11c619
upstream.
Invalidating the buffer memory in arch_sync_dma_for_device() for
FROM_DEVICE transfers
When using the streaming DMA API to map a buffer prior to inbound
non-coherent DMA (i.e. DMA_FROM_DEVICE), we invalidate any dirty CPU
cachelines so that they will not be written back during the transfer and
corrupt the buffer contents written by the DMA. This, however, poses two
potential problems:
(1) If the DMA transfer does not write to every byte in the buffer,
then the unwritten bytes will contain stale data once the transfer
has completed.
(2) If the buffer has a virtual alias in userspace, then stale data
may be visible via this alias during the period between performing
the cache invalidation and the DMA writes landing in memory.
Address both of these issues by cleaning (aka writing-back) the dirty
lines in arch_sync_dma_for_device(DMA_FROM_DEVICE) instead of discarding
them using invalidation.
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: <stable@vger.kernel.org>
Link: https://lore.kernel.org/r/20220606152150.GA31568@willie-the-truck
Signed-off-by: Will Deacon <will@kernel.org>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20220610151228.4562-2-will@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
248 lines
5.9 KiB
ArmAsm
248 lines
5.9 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Cache maintenance
|
|
*
|
|
* Copyright (C) 2001 Deep Blue Solutions Ltd.
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*/
|
|
|
|
#include <linux/errno.h>
|
|
#include <linux/linkage.h>
|
|
#include <linux/init.h>
|
|
#include <asm/assembler.h>
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/alternative.h>
|
|
#include <asm/asm-uaccess.h>
|
|
|
|
/*
|
|
* caches_clean_inval_pou_macro(start,end) [fixup]
|
|
*
|
|
* Ensure that the I and D caches are coherent within specified region.
|
|
* This is typically used when code has been written to a memory region,
|
|
* and will be executed.
|
|
*
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
* - fixup - optional label to branch to on user fault
|
|
*/
|
|
.macro caches_clean_inval_pou_macro, fixup
|
|
alternative_if ARM64_HAS_CACHE_IDC
|
|
dsb ishst
|
|
b .Ldc_skip_\@
|
|
alternative_else_nop_endif
|
|
mov x2, x0
|
|
mov x3, x1
|
|
dcache_by_line_op cvau, ish, x2, x3, x4, x5, \fixup
|
|
.Ldc_skip_\@:
|
|
alternative_if ARM64_HAS_CACHE_DIC
|
|
isb
|
|
b .Lic_skip_\@
|
|
alternative_else_nop_endif
|
|
invalidate_icache_by_line x0, x1, x2, x3, \fixup
|
|
.Lic_skip_\@:
|
|
.endm
|
|
|
|
/*
|
|
* caches_clean_inval_pou(start,end)
|
|
*
|
|
* Ensure that the I and D caches are coherent within specified region.
|
|
* This is typically used when code has been written to a memory region,
|
|
* and will be executed.
|
|
*
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
*/
|
|
SYM_FUNC_START(caches_clean_inval_pou)
|
|
caches_clean_inval_pou_macro
|
|
ret
|
|
SYM_FUNC_END(caches_clean_inval_pou)
|
|
|
|
/*
|
|
* caches_clean_inval_user_pou(start,end)
|
|
*
|
|
* Ensure that the I and D caches are coherent within specified region.
|
|
* This is typically used when code has been written to a memory region,
|
|
* and will be executed.
|
|
*
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
*/
|
|
SYM_FUNC_START(caches_clean_inval_user_pou)
|
|
uaccess_ttbr0_enable x2, x3, x4
|
|
|
|
caches_clean_inval_pou_macro 2f
|
|
mov x0, xzr
|
|
1:
|
|
uaccess_ttbr0_disable x1, x2
|
|
ret
|
|
2:
|
|
mov x0, #-EFAULT
|
|
b 1b
|
|
SYM_FUNC_END(caches_clean_inval_user_pou)
|
|
|
|
/*
|
|
* icache_inval_pou(start,end)
|
|
*
|
|
* Ensure that the I cache is invalid within specified region.
|
|
*
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
*/
|
|
SYM_FUNC_START(icache_inval_pou)
|
|
alternative_if ARM64_HAS_CACHE_DIC
|
|
isb
|
|
ret
|
|
alternative_else_nop_endif
|
|
|
|
invalidate_icache_by_line x0, x1, x2, x3
|
|
ret
|
|
SYM_FUNC_END(icache_inval_pou)
|
|
|
|
/*
|
|
* dcache_clean_inval_poc(start, end)
|
|
*
|
|
* Ensure that any D-cache lines for the interval [start, end)
|
|
* are cleaned and invalidated to the PoC.
|
|
*
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
*/
|
|
SYM_FUNC_START_PI(dcache_clean_inval_poc)
|
|
dcache_by_line_op civac, sy, x0, x1, x2, x3
|
|
ret
|
|
SYM_FUNC_END_PI(dcache_clean_inval_poc)
|
|
|
|
/*
|
|
* dcache_clean_pou(start, end)
|
|
*
|
|
* Ensure that any D-cache lines for the interval [start, end)
|
|
* are cleaned to the PoU.
|
|
*
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
*/
|
|
SYM_FUNC_START(dcache_clean_pou)
|
|
alternative_if ARM64_HAS_CACHE_IDC
|
|
dsb ishst
|
|
ret
|
|
alternative_else_nop_endif
|
|
dcache_by_line_op cvau, ish, x0, x1, x2, x3
|
|
ret
|
|
SYM_FUNC_END(dcache_clean_pou)
|
|
|
|
/*
|
|
* dcache_inval_poc(start, end)
|
|
*
|
|
* Ensure that any D-cache lines for the interval [start, end)
|
|
* are invalidated. Any partial lines at the ends of the interval are
|
|
* also cleaned to PoC to prevent data loss.
|
|
*
|
|
* - start - kernel start address of region
|
|
* - end - kernel end address of region
|
|
*/
|
|
SYM_FUNC_START_LOCAL(__dma_inv_area)
|
|
SYM_FUNC_START_PI(dcache_inval_poc)
|
|
/* FALLTHROUGH */
|
|
|
|
/*
|
|
* __dma_inv_area(start, end)
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
*/
|
|
dcache_line_size x2, x3
|
|
sub x3, x2, #1
|
|
tst x1, x3 // end cache line aligned?
|
|
bic x1, x1, x3
|
|
b.eq 1f
|
|
dc civac, x1 // clean & invalidate D / U line
|
|
1: tst x0, x3 // start cache line aligned?
|
|
bic x0, x0, x3
|
|
b.eq 2f
|
|
dc civac, x0 // clean & invalidate D / U line
|
|
b 3f
|
|
2: dc ivac, x0 // invalidate D / U line
|
|
3: add x0, x0, x2
|
|
cmp x0, x1
|
|
b.lo 2b
|
|
dsb sy
|
|
ret
|
|
SYM_FUNC_END_PI(dcache_inval_poc)
|
|
SYM_FUNC_END(__dma_inv_area)
|
|
|
|
/*
|
|
* dcache_clean_poc(start, end)
|
|
*
|
|
* Ensure that any D-cache lines for the interval [start, end)
|
|
* are cleaned to the PoC.
|
|
*
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
*/
|
|
SYM_FUNC_START_LOCAL(__dma_clean_area)
|
|
SYM_FUNC_START_PI(dcache_clean_poc)
|
|
/* FALLTHROUGH */
|
|
|
|
/*
|
|
* __dma_clean_area(start, end)
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
*/
|
|
dcache_by_line_op cvac, sy, x0, x1, x2, x3
|
|
ret
|
|
SYM_FUNC_END_PI(dcache_clean_poc)
|
|
SYM_FUNC_END(__dma_clean_area)
|
|
|
|
/*
|
|
* dcache_clean_pop(start, end)
|
|
*
|
|
* Ensure that any D-cache lines for the interval [start, end)
|
|
* are cleaned to the PoP.
|
|
*
|
|
* - start - virtual start address of region
|
|
* - end - virtual end address of region
|
|
*/
|
|
SYM_FUNC_START_PI(dcache_clean_pop)
|
|
alternative_if_not ARM64_HAS_DCPOP
|
|
b dcache_clean_poc
|
|
alternative_else_nop_endif
|
|
dcache_by_line_op cvap, sy, x0, x1, x2, x3
|
|
ret
|
|
SYM_FUNC_END_PI(dcache_clean_pop)
|
|
|
|
/*
|
|
* __dma_flush_area(start, size)
|
|
*
|
|
* clean & invalidate D / U line
|
|
*
|
|
* - start - virtual start address of region
|
|
* - size - size in question
|
|
*/
|
|
SYM_FUNC_START_PI(__dma_flush_area)
|
|
add x1, x0, x1
|
|
dcache_by_line_op civac, sy, x0, x1, x2, x3
|
|
ret
|
|
SYM_FUNC_END_PI(__dma_flush_area)
|
|
|
|
/*
|
|
* __dma_map_area(start, size, dir)
|
|
* - start - kernel virtual start address
|
|
* - size - size of region
|
|
* - dir - DMA direction
|
|
*/
|
|
SYM_FUNC_START_PI(__dma_map_area)
|
|
add x1, x0, x1
|
|
b __dma_clean_area
|
|
SYM_FUNC_END_PI(__dma_map_area)
|
|
|
|
/*
|
|
* __dma_unmap_area(start, size, dir)
|
|
* - start - kernel virtual start address
|
|
* - size - size of region
|
|
* - dir - DMA direction
|
|
*/
|
|
SYM_FUNC_START_PI(__dma_unmap_area)
|
|
add x1, x0, x1
|
|
cmp w2, #DMA_TO_DEVICE
|
|
b.ne __dma_inv_area
|
|
ret
|
|
SYM_FUNC_END_PI(__dma_unmap_area)
|