mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 17:08:10 +00:00
f003a1f182
When a large enough area in the iommu bitmap is found but would span a boundary we continue the search starting from the next bit position. For large allocations this can lead to several useless invocations of bitmap_find_next_zero_area() and iommu_is_span_boundary(). Continue the search from the start of the next segment (which is the next bit position such that we'll not cross the same segment boundary again). Link: http://lkml.kernel.org/r/alpine.LFD.2.20.1606081910070.3211@schleppi Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
40 lines
1 KiB
C
40 lines
1 KiB
C
/*
|
|
* IOMMU helper functions for the free area management
|
|
*/
|
|
|
|
#include <linux/export.h>
|
|
#include <linux/bitmap.h>
|
|
#include <linux/bug.h>
|
|
|
|
int iommu_is_span_boundary(unsigned int index, unsigned int nr,
|
|
unsigned long shift,
|
|
unsigned long boundary_size)
|
|
{
|
|
BUG_ON(!is_power_of_2(boundary_size));
|
|
|
|
shift = (shift + index) & (boundary_size - 1);
|
|
return shift + nr > boundary_size;
|
|
}
|
|
|
|
unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
|
|
unsigned long start, unsigned int nr,
|
|
unsigned long shift, unsigned long boundary_size,
|
|
unsigned long align_mask)
|
|
{
|
|
unsigned long index;
|
|
|
|
/* We don't want the last of the limit */
|
|
size -= 1;
|
|
again:
|
|
index = bitmap_find_next_zero_area(map, size, start, nr, align_mask);
|
|
if (index < size) {
|
|
if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
|
|
start = ALIGN(shift + index, boundary_size) - shift;
|
|
goto again;
|
|
}
|
|
bitmap_set(map, index, nr);
|
|
return index;
|
|
}
|
|
return -1;
|
|
}
|
|
EXPORT_SYMBOL(iommu_area_alloc);
|