xen-swiotlb: split xen_swiotlb_init

Split xen_swiotlb_init into a normal an an early case.  That makes both
much simpler and more readable, and also allows marking the early
code as __init and x86-only.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
Christoph Hellwig 2021-03-01 08:44:32 +01:00 committed by Konrad Rzeszutek Wilk
parent 5d0538b2b8
commit a98f565462
4 changed files with 75 additions and 58 deletions

View File

@ -140,7 +140,7 @@ static int __init xen_mm_init(void)
struct gnttab_cache_flush cflush;
if (!xen_initial_domain())
return 0;
xen_swiotlb_init(1, false);
xen_swiotlb_init();
cflush.op = 0;
cflush.a.dev_bus_addr = 0;

View File

@ -59,7 +59,7 @@ int __init pci_xen_swiotlb_detect(void)
void __init pci_xen_swiotlb_init(void)
{
if (xen_swiotlb) {
xen_swiotlb_init(1, true /* early */);
xen_swiotlb_init_early();
dma_ops = &xen_swiotlb_dma_ops;
#ifdef CONFIG_PCI
@ -76,7 +76,7 @@ int pci_xen_swiotlb_init_late(void)
if (xen_swiotlb)
return 0;
rc = xen_swiotlb_init(1, false /* late */);
rc = xen_swiotlb_init();
if (rc)
return rc;

View File

@ -156,96 +156,112 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
#define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)
int __ref xen_swiotlb_init(int verbose, bool early)
int __ref xen_swiotlb_init(void)
{
unsigned long bytes, order;
int rc = -ENOMEM;
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
unsigned long nslabs, bytes, order;
unsigned int repeat = 3;
int rc = -ENOMEM;
char *start;
unsigned long nslabs;
nslabs = swiotlb_nr_tbl();
retry:
if (!nslabs)
nslabs = DEFAULT_NSLABS;
retry:
m_ret = XEN_SWIOTLB_ENOMEM;
bytes = nslabs << IO_TLB_SHIFT;
order = get_order(bytes);
/*
* Get IO TLB memory from any location.
*/
if (early) {
start = memblock_alloc(PAGE_ALIGN(bytes),
PAGE_SIZE);
if (!start)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_ALIGN(bytes), PAGE_SIZE);
} else {
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
start = (void *)xen_get_swiotlb_free_pages(order);
if (start)
break;
order--;
}
if (order != get_order(bytes)) {
pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
(PAGE_SIZE << order) >> 20);
nslabs = SLABS_PER_PAGE << order;
bytes = nslabs << IO_TLB_SHIFT;
}
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
start = (void *)xen_get_swiotlb_free_pages(order);
if (start)
break;
order--;
}
if (!start) {
m_ret = XEN_SWIOTLB_ENOMEM;
if (!start)
goto error;
if (order != get_order(bytes)) {
pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
(PAGE_SIZE << order) >> 20);
nslabs = SLABS_PER_PAGE << order;
bytes = nslabs << IO_TLB_SHIFT;
}
/*
* And replace that memory with pages under 4GB.
*/
rc = xen_swiotlb_fixup(start,
bytes,
nslabs);
rc = xen_swiotlb_fixup(start, bytes, nslabs);
if (rc) {
if (early)
memblock_free(__pa(start),
PAGE_ALIGN(bytes));
else {
free_pages((unsigned long)start, order);
start = NULL;
}
free_pages((unsigned long)start, order);
m_ret = XEN_SWIOTLB_EFIXUP;
goto error;
}
if (early) {
if (swiotlb_init_with_tbl(start, nslabs,
verbose))
panic("Cannot allocate SWIOTLB buffer");
rc = 0;
} else
rc = swiotlb_late_init_with_tbl(start, nslabs);
if (!rc)
swiotlb_set_max_segment(PAGE_SIZE);
return rc;
rc = swiotlb_late_init_with_tbl(start, nslabs);
if (rc)
return rc;
swiotlb_set_max_segment(PAGE_SIZE);
return 0;
error:
if (repeat--) {
nslabs = max(1024UL, /* Min is 2MB */
(nslabs >> 1));
/* Min is 2MB */
nslabs = max(1024UL, (nslabs >> 1));
pr_info("Lowering to %luMB\n",
(nslabs << IO_TLB_SHIFT) >> 20);
goto retry;
}
pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
if (early)
panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
else
free_pages((unsigned long)start, order);
free_pages((unsigned long)start, order);
return rc;
}
#ifdef CONFIG_X86
void __init xen_swiotlb_init_early(void)
{
unsigned long nslabs, bytes;
unsigned int repeat = 3;
char *start;
int rc;
nslabs = swiotlb_nr_tbl();
if (!nslabs)
nslabs = DEFAULT_NSLABS;
retry:
/*
* Get IO TLB memory from any location.
*/
bytes = nslabs << IO_TLB_SHIFT;
start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
if (!start)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_ALIGN(bytes), PAGE_SIZE);
/*
* And replace that memory with pages under 4GB.
*/
rc = xen_swiotlb_fixup(start, bytes, nslabs);
if (rc) {
memblock_free(__pa(start), PAGE_ALIGN(bytes));
if (repeat--) {
/* Min is 2MB */
nslabs = max(1024UL, (nslabs >> 1));
pr_info("Lowering to %luMB\n",
(nslabs << IO_TLB_SHIFT) >> 20);
goto retry;
}
panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
}
if (swiotlb_init_with_tbl(start, nslabs, false))
panic("Cannot allocate SWIOTLB buffer");
swiotlb_set_max_segment(PAGE_SIZE);
}
#endif /* CONFIG_X86 */
static void *
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,

View File

@ -9,7 +9,8 @@ void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir);
extern int xen_swiotlb_init(int verbose, bool early);
int xen_swiotlb_init(void);
void __init xen_swiotlb_init_early(void);
extern const struct dma_map_ops xen_swiotlb_dma_ops;
#endif /* __LINUX_SWIOTLB_XEN_H */