mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-06 08:46:46 +00:00
Merge branch 'fixes-for-linus-for-3.6-rc2' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping
Pull three dma-mapping fixes from Marek Szyprowski. * 'fixes-for-linus-for-3.6-rc2' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping: ARM: dma-mapping: fix incorrect freeing of atomic allocations ARM: dma-mapping: fix atomic allocation alignment ARM: mm: fix MMU mapping of CMA regions
This commit is contained in:
commit
f720e7ea3a
1 changed files with 6 additions and 6 deletions
|
@ -358,7 +358,7 @@ void __init dma_contiguous_remap(void)
|
||||||
if (end > arm_lowmem_limit)
|
if (end > arm_lowmem_limit)
|
||||||
end = arm_lowmem_limit;
|
end = arm_lowmem_limit;
|
||||||
if (start >= end)
|
if (start >= end)
|
||||||
return;
|
continue;
|
||||||
|
|
||||||
map.pfn = __phys_to_pfn(start);
|
map.pfn = __phys_to_pfn(start);
|
||||||
map.virtual = __phys_to_virt(start);
|
map.virtual = __phys_to_virt(start);
|
||||||
|
@ -423,7 +423,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
|
||||||
unsigned int pageno;
|
unsigned int pageno;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
void *ptr = NULL;
|
void *ptr = NULL;
|
||||||
size_t align;
|
unsigned long align_mask;
|
||||||
|
|
||||||
if (!pool->vaddr) {
|
if (!pool->vaddr) {
|
||||||
WARN(1, "coherent pool not initialised!\n");
|
WARN(1, "coherent pool not initialised!\n");
|
||||||
|
@ -435,11 +435,11 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
|
||||||
* small, so align them to their order in pages, minimum is a page
|
* small, so align them to their order in pages, minimum is a page
|
||||||
* size. This helps reduce fragmentation of the DMA space.
|
* size. This helps reduce fragmentation of the DMA space.
|
||||||
*/
|
*/
|
||||||
align = PAGE_SIZE << get_order(size);
|
align_mask = (1 << get_order(size)) - 1;
|
||||||
|
|
||||||
spin_lock_irqsave(&pool->lock, flags);
|
spin_lock_irqsave(&pool->lock, flags);
|
||||||
pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
|
pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
|
||||||
0, count, (1 << align) - 1);
|
0, count, align_mask);
|
||||||
if (pageno < pool->nr_pages) {
|
if (pageno < pool->nr_pages) {
|
||||||
bitmap_set(pool->bitmap, pageno, count);
|
bitmap_set(pool->bitmap, pageno, count);
|
||||||
ptr = pool->vaddr + PAGE_SIZE * pageno;
|
ptr = pool->vaddr + PAGE_SIZE * pageno;
|
||||||
|
@ -648,12 +648,12 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||||
|
|
||||||
if (arch_is_coherent() || nommu()) {
|
if (arch_is_coherent() || nommu()) {
|
||||||
__dma_free_buffer(page, size);
|
__dma_free_buffer(page, size);
|
||||||
|
} else if (__free_from_pool(cpu_addr, size)) {
|
||||||
|
return;
|
||||||
} else if (!IS_ENABLED(CONFIG_CMA)) {
|
} else if (!IS_ENABLED(CONFIG_CMA)) {
|
||||||
__dma_free_remap(cpu_addr, size);
|
__dma_free_remap(cpu_addr, size);
|
||||||
__dma_free_buffer(page, size);
|
__dma_free_buffer(page, size);
|
||||||
} else {
|
} else {
|
||||||
if (__free_from_pool(cpu_addr, size))
|
|
||||||
return;
|
|
||||||
/*
|
/*
|
||||||
* Non-atomic allocations cannot be freed with IRQs disabled
|
* Non-atomic allocations cannot be freed with IRQs disabled
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in a new issue