staging: gasket: cleanup if dma_map_page fails in gasket_perform_mapping

Previously pages would have never been unmapped in this case.

Signed-off-by: Nick Ewalt <nicholasewalt@google.com>
Signed-off-by: Todd Poynor <toddpoynor@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Nick Ewalt 2018-09-17 05:39:03 -07:00 committed by Greg Kroah-Hartman
parent c3873a5c74
commit 863739bda2

View file

@ -433,6 +433,19 @@ static int is_coherent(struct gasket_page_table *pg_tbl, ulong host_addr)
return min <= host_addr && host_addr < max; return min <= host_addr && host_addr < max;
} }
/* Safely return a page to the OS. */
static bool gasket_release_page(struct page *page)
{
if (!page)
return false;
if (!PageReserved(page))
SetPageDirty(page);
put_page(page);
return true;
}
/* /*
* Get and map last level page table buffers. * Get and map last level page table buffers.
* *
@ -500,6 +513,13 @@ static int gasket_perform_mapping(struct gasket_page_table *pg_tbl,
(unsigned long long)ptes[i].dma_addr, (unsigned long long)ptes[i].dma_addr,
(void *)page_to_pfn(page), (void *)page_to_pfn(page),
(void *)page_to_phys(page)); (void *)page_to_phys(page));
/* clean up */
if (gasket_release_page(ptes[i].page))
--pg_tbl->num_active_pages;
memset(&ptes[i], 0,
sizeof(struct gasket_page_table_entry));
return -1; return -1;
} }
} }
@ -571,19 +591,6 @@ static int gasket_alloc_simple_entries(struct gasket_page_table *pg_tbl,
return 0; return 0;
} }
/* Safely return a page to the OS. */
static bool gasket_release_page(struct page *page)
{
if (!page)
return false;
if (!PageReserved(page))
SetPageDirty(page);
put_page(page);
return true;
}
/* /*
* Unmap and release mapped pages. * Unmap and release mapped pages.
* The page table mutex must be held by the caller. * The page table mutex must be held by the caller.