ma-mapping fixes for Linux 6.7

- don't leave pages decrypted for DMA in encrypted memory setups linger
    around on failure (Petr Tesarik)
  - fix an out of bounds access in the new dynamic swiotlb code (Petr Tesarik)
  - fix dma_addressing_limited for systems with weird physical memory layouts
    (Jia He)
 -----BEGIN PGP SIGNATURE-----
 
 iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmVN1HQLHGhjaEBsc3Qu
 ZGUACgkQD55TZVIEUYMXsg//YYUP27ZfjqOeyRAv5IZ56u5Gci8d32vHEZjvEngI
 5wAErzIoGzHXtZIk5nCU9Lrc4+g608gXqqefkU7e0lAMHVSpExHF0ZxktRBG0/bz
 OQNLrlT9HpnOJgAKLg4a2rSpomfbtMBd1MNek1ZI8Osz49AagqANOOlfpr13lvw6
 kWzZEnoRKJqUW3x8g5u/WnggZzoBYHeMJp9EORutnhxU09DlpJ6pVg5wP7ysKQfT
 FUoX4YUoe52pYgluTwNlJkh/Mxe3/oZOPbCIMB0eclVxylLDVEZcqlh9A91BTaQK
 rOQv51UGl2eS1DvIDUqgoy3VlB0PQ9FADdGVP0BQfnn9yS1vfo4A7hQS99jLejC1
 SnAsASeWVj5Ot/peWMUh5UDoHhJWtlEY6Lfv5Qr1a8Gan21+3CrBLhd67eUvun40
 koafsbUzWgmY9qadNNjjebY761WXa2TgLb0LzYo42Asur8Qw1FC8/OHV6QMET/t7
 jB+NqQWydIAr6dEzVbqm5ZQ2/r3hXuzJcOKjKhgjhuTzHAGXkeiAkkkuGhPQr5Nq
 vqua2m55xwCK8Zucie/tnj4ujRY1hnUgxcs0sm0koDVNcpYm3h1MmoTqzaISJVPh
 4edyTESz95MlgiMzion8+Gq/dGVeYzyO0XKWnyMVQ7pCJnJfoWa5Pqhgsg+dXiU8
 Txo=
 =6pQM
 -----END PGP SIGNATURE-----

Merge tag 'dma-mapping-6.7-2023-11-10' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:

 - don't leave pages decrypted for DMA in encrypted memory setups linger
   around on failure (Petr Tesarik)

 - fix an out of bounds access in the new dynamic swiotlb code (Petr
   Tesarik)

 - fix dma_addressing_limited for systems with weird physical memory
   layouts (Jia He)

* tag 'dma-mapping-6.7-2023-11-10' of git://git.infradead.org/users/hch/dma-mapping:
  swiotlb: fix out-of-bounds TLB allocations with CONFIG_SWIOTLB_DYNAMIC
  dma-mapping: fix dma_addressing_limited() if dma_range_map can't cover all system RAM
  dma-mapping: move dma_addressing_limited() out of line
  swiotlb: do not free decrypted pages if dynamic
This commit is contained in:
Linus Torvalds 2023-11-10 11:09:07 -08:00
commit 391ce5b9c4
5 changed files with 86 additions and 24 deletions

View File

@ -144,6 +144,7 @@ bool dma_pci_p2pdma_supported(struct device *dev);
int dma_set_mask(struct device *dev, u64 mask);
int dma_set_coherent_mask(struct device *dev, u64 mask);
u64 dma_get_required_mask(struct device *dev);
bool dma_addressing_limited(struct device *dev);
size_t dma_max_mapping_size(struct device *dev);
size_t dma_opt_mapping_size(struct device *dev);
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
@ -264,6 +265,10 @@ static inline u64 dma_get_required_mask(struct device *dev)
{
return 0;
}
static inline bool dma_addressing_limited(struct device *dev)
{
return false;
}
static inline size_t dma_max_mapping_size(struct device *dev)
{
return 0;
@ -465,20 +470,6 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
return dma_set_mask_and_coherent(dev, mask);
}
/**
* dma_addressing_limited - return if the device is addressing limited
* @dev: device to check
*
* Return %true if the devices DMA mask is too small to address all memory in
* the system, else %false. Lack of addressing bits is the prime reason for
* bounce buffering, but might not be the only one.
*/
static inline bool dma_addressing_limited(struct device *dev)
{
return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
dma_get_required_mask(dev);
}
static inline unsigned int dma_get_max_seg_size(struct device *dev)
{
if (dev->dma_parms && dev->dma_parms->max_segment_size)

View File

@ -587,6 +587,46 @@ int dma_direct_supported(struct device *dev, u64 mask)
return mask >= phys_to_dma_unencrypted(dev, min_mask);
}
/*
* To check whether all ram resource ranges are covered by dma range map
* Returns 0 when further check is needed
* Returns 1 if there is some RAM range can't be covered by dma_range_map
*/
static int check_ram_in_range_map(unsigned long start_pfn,
unsigned long nr_pages, void *data)
{
unsigned long end_pfn = start_pfn + nr_pages;
const struct bus_dma_region *bdr = NULL;
const struct bus_dma_region *m;
struct device *dev = data;
while (start_pfn < end_pfn) {
for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
if (start_pfn >= cpu_start_pfn &&
start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) {
bdr = m;
break;
}
}
if (!bdr)
return 1;
start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size);
}
return 0;
}
bool dma_direct_all_ram_mapped(struct device *dev)
{
if (!dev->dma_range_map)
return true;
return !walk_system_ram_range(0, PFN_DOWN(ULONG_MAX) + 1, dev,
check_ram_in_range_map);
}
size_t dma_direct_max_mapping_size(struct device *dev)
{
/* If SWIOTLB is active, use its maximum mapping size */

View File

@ -20,6 +20,7 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs);
bool dma_direct_all_ram_mapped(struct device *dev);
size_t dma_direct_max_mapping_size(struct device *dev);
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \

View File

@ -793,6 +793,28 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
}
EXPORT_SYMBOL(dma_set_coherent_mask);
/**
* dma_addressing_limited - return if the device is addressing limited
* @dev: device to check
*
* Return %true if the devices DMA mask is too small to address all memory in
* the system, else %false. Lack of addressing bits is the prime reason for
* bounce buffering, but might not be the only one.
*/
bool dma_addressing_limited(struct device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
dma_get_required_mask(dev))
return true;
if (unlikely(ops))
return false;
return !dma_direct_all_ram_mapped(dev);
}
EXPORT_SYMBOL_GPL(dma_addressing_limited);
size_t dma_max_mapping_size(struct device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev);

View File

@ -283,7 +283,8 @@ static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
}
for (i = 0; i < mem->nslabs; i++) {
mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i),
mem->nslabs - i);
mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
mem->slots[i].alloc_size = 0;
}
@ -558,29 +559,40 @@ void __init swiotlb_exit(void)
* alloc_dma_pages() - allocate pages to be used for DMA
* @gfp: GFP flags for the allocation.
* @bytes: Size of the buffer.
* @phys_limit: Maximum allowed physical address of the buffer.
*
* Allocate pages from the buddy allocator. If successful, make the allocated
* pages decrypted that they can be used for DMA.
*
* Return: Decrypted pages, or %NULL on failure.
* Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN)
* if the allocated physical address was above @phys_limit.
*/
static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes)
static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit)
{
unsigned int order = get_order(bytes);
struct page *page;
phys_addr_t paddr;
void *vaddr;
page = alloc_pages(gfp, order);
if (!page)
return NULL;
vaddr = page_address(page);
paddr = page_to_phys(page);
if (paddr + bytes - 1 > phys_limit) {
__free_pages(page, order);
return ERR_PTR(-EAGAIN);
}
vaddr = phys_to_virt(paddr);
if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
goto error;
return page;
error:
__free_pages(page, order);
/* Intentional leak if pages cannot be encrypted again. */
if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
__free_pages(page, order);
return NULL;
}
@ -618,11 +630,7 @@ static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
else if (phys_limit <= DMA_BIT_MASK(32))
gfp |= __GFP_DMA32;
while ((page = alloc_dma_pages(gfp, bytes)) &&
page_to_phys(page) + bytes - 1 > phys_limit) {
/* allocated, but too high */
__free_pages(page, get_order(bytes));
while (IS_ERR(page = alloc_dma_pages(gfp, bytes, phys_limit))) {
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
phys_limit < DMA_BIT_MASK(64) &&
!(gfp & (__GFP_DMA32 | __GFP_DMA)))