linux-stable/mm/io-mapping.c
Christoph Hellwig 1fbaf8fc12 mm: add a io_mapping_map_user helper
Add a helper that calls remap_pfn_range for an struct io_mapping, relying
on the pgprot pre-validation done when creating the mapping instead of
doing it at runtime.

Link: https://lkml.kernel.org/r/20210326055505.1424432-3-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-04-30 11:20:39 -07:00

29 lines
993 B
C

// SPDX-License-Identifier: GPL-2.0-only
#include <linux/mm.h>
#include <linux/io-mapping.h>
/**
* io_mapping_map_user - remap an I/O mapping to userspace
* @iomap: the source io_mapping
* @vma: user vma to map to
* @addr: target user address to start at
* @pfn: physical address of kernel memory
* @size: size of map area
*
* Note: this is only safe if the mm semaphore is held when called.
*/
int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size)
{
vm_flags_t expected_flags = VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
if (WARN_ON_ONCE((vma->vm_flags & expected_flags) != expected_flags))
return -EINVAL;
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
return remap_pfn_range_notrack(vma, addr, pfn, size,
__pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
(pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK)));
}
EXPORT_SYMBOL_GPL(io_mapping_map_user);