mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 16:38:12 +00:00
mm: add remap_pfn_range_notrack
Patch series "add remap_pfn_range_notrack instead of reinventing it in i915", v2. i915 has some reason to want to avoid the track_pfn_remap overhead in remap_pfn_range. Add a function to the core VM to do just that rather than reinventing the functionality poorly in the driver. Note that the remap_io_sg path does get exercises when using Xorg on my Thinkpad X1, so this should be considered lightly tested, I've not managed to hit the remap_io_mapping path at all. This patch (of 4): Add a version of remap_pfn_range that does not call track_pfn_range. This will be used to fix horrible abuses of VM internals in the i915 driver. Link: https://lkml.kernel.org/r/20210326055505.1424432-1-hch@lst.de Link: https://lkml.kernel.org/r/20210326055505.1424432-2-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
f900110782
commit
74ffa5a3e6
2 changed files with 34 additions and 21 deletions
|
@ -2732,6 +2732,8 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
|
|||
struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
|
||||
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
|
||||
unsigned long pfn, unsigned long size, pgprot_t);
|
||||
int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn, unsigned long size, pgprot_t prot);
|
||||
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
|
||||
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
|
||||
struct page **pages, unsigned long *num);
|
||||
|
|
53
mm/memory.c
53
mm/memory.c
|
@ -2260,26 +2260,17 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* remap_pfn_range - remap kernel memory to userspace
|
||||
* @vma: user vma to map to
|
||||
* @addr: target page aligned user address to start at
|
||||
* @pfn: page frame number of kernel physical memory address
|
||||
* @size: size of mapping area
|
||||
* @prot: page protection flags for this mapping
|
||||
*
|
||||
* Note: this is only safe if the mm semaphore is held when called.
|
||||
*
|
||||
* Return: %0 on success, negative error code otherwise.
|
||||
/*
|
||||
* Variant of remap_pfn_range that does not call track_pfn_remap. The caller
|
||||
* must have pre-validated the caching bits of the pgprot_t.
|
||||
*/
|
||||
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn, unsigned long size, pgprot_t prot)
|
||||
int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn, unsigned long size, pgprot_t prot)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
unsigned long next;
|
||||
unsigned long end = addr + PAGE_ALIGN(size);
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
unsigned long remap_pfn = pfn;
|
||||
int err;
|
||||
|
||||
if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
|
||||
|
@ -2309,10 +2300,6 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
|
|||
vma->vm_pgoff = pfn;
|
||||
}
|
||||
|
||||
err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
|
||||
if (err)
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
|
||||
BUG_ON(addr >= end);
|
||||
|
@ -2324,12 +2311,36 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
|
|||
err = remap_p4d_range(mm, pgd, addr, next,
|
||||
pfn + (addr >> PAGE_SHIFT), prot);
|
||||
if (err)
|
||||
break;
|
||||
return err;
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
|
||||
if (err)
|
||||
untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* remap_pfn_range - remap kernel memory to userspace
|
||||
* @vma: user vma to map to
|
||||
* @addr: target page aligned user address to start at
|
||||
* @pfn: page frame number of kernel physical memory address
|
||||
* @size: size of mapping area
|
||||
* @prot: page protection flags for this mapping
|
||||
*
|
||||
* Note: this is only safe if the mm semaphore is held when called.
|
||||
*
|
||||
* Return: %0 on success, negative error code otherwise.
|
||||
*/
|
||||
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn, unsigned long size, pgprot_t prot)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
|
||||
if (err)
|
||||
return -EINVAL;
|
||||
|
||||
err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
|
||||
if (err)
|
||||
untrack_pfn(vma, pfn, PAGE_ALIGN(size));
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(remap_pfn_range);
|
||||
|
|
Loading…
Reference in a new issue