diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 4c1e9a3c0ab6..f488997b0717 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -160,7 +160,7 @@ if DMA_CMA config DMA_NUMA_CMA bool "Enable separate DMA Contiguous Memory Area for NUMA Node" - default NUMA + depends on NUMA help Enable this option to get numa CMA areas so that NUMA devices can get local memory by DMA coherent APIs. diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index 88c595e49e34..f005c66f378c 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -473,11 +473,6 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem) return -EBUSY; } - if (memblock_is_region_reserved(rmem->base, rmem->size)) { - pr_info("Reserved memory: overlap with other memblock reserved region\n"); - return -EBUSY; - } - if (!of_get_flat_dt_prop(node, "reusable", NULL) || of_get_flat_dt_prop(node, "no-map", NULL)) return -EINVAL; diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index f190651bcadd..06366acd27b0 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -637,15 +637,19 @@ static struct dma_debug_entry *__dma_entry_alloc(void) return entry; } -static void __dma_entry_alloc_check_leak(void) +/* + * This should be called outside of free_entries_lock scope to avoid potential + * deadlocks with serial consoles that use DMA. + */ +static void __dma_entry_alloc_check_leak(u32 nr_entries) { - u32 tmp = nr_total_entries % nr_prealloc_entries; + u32 tmp = nr_entries % nr_prealloc_entries; /* Shout each time we tick over some multiple of the initial pool */ if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) { pr_info("dma_debug_entry pool grown to %u (%u00%%)\n", - nr_total_entries, - (nr_total_entries / nr_prealloc_entries)); + nr_entries, + (nr_entries / nr_prealloc_entries)); } } @@ -656,8 +660,10 @@ static void __dma_entry_alloc_check_leak(void) */ static struct dma_debug_entry *dma_entry_alloc(void) { + bool alloc_check_leak = false; struct dma_debug_entry *entry; unsigned long flags; + u32 nr_entries; spin_lock_irqsave(&free_entries_lock, flags); if (num_free_entries == 0) { @@ -667,13 +673,17 @@ static struct dma_debug_entry *dma_entry_alloc(void) pr_err("debugging out of memory - disabling\n"); return NULL; } - __dma_entry_alloc_check_leak(); + alloc_check_leak = true; + nr_entries = nr_total_entries; } entry = __dma_entry_alloc(); spin_unlock_irqrestore(&free_entries_lock, flags); + if (alloc_check_leak) + __dma_entry_alloc_check_leak(nr_entries); + #ifdef CONFIG_STACKTRACE entry->stack_len = stack_trace_save(entry->stack_entries, ARRAY_SIZE(entry->stack_entries), diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c index 1acec2e22827..b481c48a31a6 100644 --- a/kernel/dma/pool.c +++ b/kernel/dma/pool.c @@ -135,9 +135,9 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, remove_mapping: #ifdef CONFIG_DMA_DIRECT_REMAP dma_common_free_remap(addr, pool_size); -#endif -free_page: __maybe_unused +free_page: __free_pages(page, order); +#endif out: return ret; }