From 61883d3c32418f16e35e030ca0cfd5d2de95a649 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 15 Mar 2023 14:31:32 +0300 Subject: [PATCH] iommu: fix MAX_ORDER usage in __iommu_dma_alloc_pages() MAX_ORDER is not inclusive: the maximum allocation order buddy allocator can deliver is MAX_ORDER-1. Fix MAX_ORDER usage in __iommu_dma_alloc_pages(). Also use GENMASK() instead of hard to read "(2U << order) - 1" magic. Link: https://lkml.kernel.org/r/20230315113133.11326-10-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Acked-by: Robin Murphy Reviewed-by: Jacob Pan Acked-by: Joerg Roedel Signed-off-by: Andrew Morton --- drivers/iommu/dma-iommu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 99b2646cb5c7..ac996fd6bd9c 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -736,7 +736,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, struct page **pages; unsigned int i = 0, nid = dev_to_node(dev); - order_mask &= (2U << MAX_ORDER) - 1; + order_mask &= GENMASK(MAX_ORDER - 1, 0); if (!order_mask) return NULL; @@ -756,7 +756,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, * than a necessity, hence using __GFP_NORETRY until * falling back to minimum-order allocations. */ - for (order_mask &= (2U << __fls(count)) - 1; + for (order_mask &= GENMASK(__fls(count), 0); order_mask; order_mask &= ~order_size) { unsigned int order = __fls(order_mask); gfp_t alloc_flags = gfp;