xen-swiotlb: remove xen_io_tlb_start and xen_io_tlb_nslabs

The xen_io_tlb_start and xen_io_tlb_nslabs variables are now only used in
xen_swiotlb_init, so replace them with local variables.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
Christoph Hellwig 2021-03-01 08:44:30 +01:00 committed by Konrad Rzeszutek Wilk
parent 4035b43da6
commit cbce99527c

View file

@ -40,14 +40,7 @@
#include <trace/events/swiotlb.h>
#define MAX_DMA_BITS 32
/*
* Used to do a quick range check in swiotlb_tbl_unmap_single and
* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
* API.
*/
static char *xen_io_tlb_start;
static unsigned long xen_io_tlb_nslabs;
/*
* Quick lookup value of the bus address of the IOTLB.
*/
@ -169,75 +162,75 @@ int __ref xen_swiotlb_init(int verbose, bool early)
int rc = -ENOMEM;
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
unsigned int repeat = 3;
char *start;
unsigned long nslabs;
xen_io_tlb_nslabs = swiotlb_nr_tbl();
nslabs = swiotlb_nr_tbl();
retry:
if (!xen_io_tlb_nslabs)
xen_io_tlb_nslabs = DEFAULT_NSLABS;
bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
if (!nslabs)
nslabs = DEFAULT_NSLABS;
bytes = nslabs << IO_TLB_SHIFT;
order = get_order(bytes);
/*
* IO TLB memory already allocated. Just use it.
*/
if (io_tlb_start != 0) {
xen_io_tlb_start = phys_to_virt(io_tlb_start);
if (io_tlb_start != 0)
goto end;
}
/*
* Get IO TLB memory from any location.
*/
if (early) {
xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
start = memblock_alloc(PAGE_ALIGN(bytes),
PAGE_SIZE);
if (!xen_io_tlb_start)
if (!start)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_ALIGN(bytes), PAGE_SIZE);
} else {
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
if (xen_io_tlb_start)
start = (void *)xen_get_swiotlb_free_pages(order);
if (start)
break;
order--;
}
if (order != get_order(bytes)) {
pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
(PAGE_SIZE << order) >> 20);
xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
nslabs = SLABS_PER_PAGE << order;
bytes = nslabs << IO_TLB_SHIFT;
}
}
if (!xen_io_tlb_start) {
if (!start) {
m_ret = XEN_SWIOTLB_ENOMEM;
goto error;
}
/*
* And replace that memory with pages under 4GB.
*/
rc = xen_swiotlb_fixup(xen_io_tlb_start,
rc = xen_swiotlb_fixup(start,
bytes,
xen_io_tlb_nslabs);
nslabs);
if (rc) {
if (early)
memblock_free(__pa(xen_io_tlb_start),
memblock_free(__pa(start),
PAGE_ALIGN(bytes));
else {
free_pages((unsigned long)xen_io_tlb_start, order);
xen_io_tlb_start = NULL;
free_pages((unsigned long)start, order);
start = NULL;
}
m_ret = XEN_SWIOTLB_EFIXUP;
goto error;
}
if (early) {
if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
if (swiotlb_init_with_tbl(start, nslabs,
verbose))
panic("Cannot allocate SWIOTLB buffer");
rc = 0;
} else
rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
rc = swiotlb_late_init_with_tbl(start, nslabs);
end:
if (!rc)
@ -246,17 +239,17 @@ int __ref xen_swiotlb_init(int verbose, bool early)
return rc;
error:
if (repeat--) {
xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
(xen_io_tlb_nslabs >> 1));
nslabs = max(1024UL, /* Min is 2MB */
(nslabs >> 1));
pr_info("Lowering to %luMB\n",
(xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
(nslabs << IO_TLB_SHIFT) >> 20);
goto retry;
}
pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
if (early)
panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
else
free_pages((unsigned long)xen_io_tlb_start, order);
free_pages((unsigned long)start, order);
return rc;
}