mm: Enforce VM_IOREMAP flag and range in ioremap_page_range.

There are various users of get_vm_area() + ioremap_page_range() APIs.
Enforce that get_vm_area() was requested as VM_IOREMAP type and range
passed to ioremap_page_range() matches created vm_area to avoid
accidentally ioremap-ing into wrong address range.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/bpf/20240305030516.41519-2-alexei.starovoitov@gmail.com
This commit is contained in:
Alexei Starovoitov 2024-03-04 19:05:15 -08:00 committed by Andrii Nakryiko
parent 8f50d5c423
commit 3e49a866c9
1 changed files with 13 additions and 0 deletions

View File

@ -307,8 +307,21 @@ static int vmap_range_noflush(unsigned long addr, unsigned long end,
int ioremap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot)
{
struct vm_struct *area;
int err;
area = find_vm_area((void *)addr);
if (!area || !(area->flags & VM_IOREMAP)) {
WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr);
return -EINVAL;
}
if (addr != (unsigned long)area->addr ||
(void *)end != area->addr + get_vm_area_size(area)) {
WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n",
addr, end, (long)area->addr,
(long)area->addr + get_vm_area_size(area));
return -ERANGE;
}
err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
ioremap_max_page_shift);
flush_cache_vmap(addr, end);