mm/vmalloc: remove map_kernel_range

Patch series "mm/vmalloc: cleanup after hugepage series", v2.

Christoph pointed out some overdue cleanups required after the huge
vmalloc series, and I had another failure error message improvement as
well.

This patch (of 5):

This is a shim around vmap_pages_range, get rid of it.

Move the main API comment from the _noflush variant to the normal variant,
and make _noflush internal to mm/.

Link: https://lkml.kernel.org/r/20210322021806.892164-1-npiggin@gmail.com
Link: https://lkml.kernel.org/r/20210322021806.892164-2-npiggin@gmail.com
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Uladzislau Rezki <urezki@gmail.com>
Cc: Cédric Le Goater <clg@kaod.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Nicholas Piggin 2021-04-29 22:58:53 -07:00 committed by Linus Torvalds
parent 121e6f3258
commit b67177ecd9
5 changed files with 39 additions and 52 deletions

View File

@ -213,7 +213,7 @@ Here are the routines, one by one:
there will be no entries in the cache for the kernel address
space for virtual addresses in the range 'start' to 'end-1'.
The first of these two routines is invoked after map_kernel_range()
The first of these two routines is invoked after vmap_range()
has installed the page table entries. The second is invoked
before unmap_kernel_range() deletes the page table entries.

View File

@ -212,10 +212,6 @@ static inline bool is_vm_area_hugepages(const void *addr)
int vmap_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot,
unsigned int max_page_shift);
extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages);
int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
struct page **pages);
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
static inline void set_vm_flush_reset_perms(void *addr)
@ -227,13 +223,6 @@ static inline void set_vm_flush_reset_perms(void *addr)
}
#else
static inline int
map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages)
{
return size >> PAGE_SHIFT;
}
#define map_kernel_range map_kernel_range_noflush
static inline void
unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{

View File

@ -637,4 +637,10 @@ struct migration_target_control {
gfp_t gfp_mask;
};
/*
* mm/vmalloc.c
*/
int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift);
#endif /* __MM_INTERNAL_H */

View File

@ -8,6 +8,7 @@
* Chunks are mapped into vmalloc areas and populated page by page.
* This is the default chunk allocator.
*/
#include "internal.h"
static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
unsigned int cpu, int page_idx)
@ -192,8 +193,8 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
static int __pcpu_map_pages(unsigned long addr, struct page **pages,
int nr_pages)
{
return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
PAGE_KERNEL, pages);
return vmap_pages_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT),
PAGE_KERNEL, pages, PAGE_SHIFT);
}
/**

View File

@ -523,7 +523,16 @@ static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
return 0;
}
static int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
/*
* vmap_pages_range_noflush is similar to vmap_pages_range, but does not
* flush caches.
*
* The caller is responsible for calling flush_cache_vmap() after this
* function returns successfully and before the addresses are accessed.
*
* This is an internal function only. Do not use outside mm/.
*/
int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift)
{
unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
@ -549,6 +558,18 @@ static int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
return 0;
}
/**
* vmap_pages_range - map pages to a kernel virtual address
* @addr: start of the VM area to map
* @end: end of the VM area to map (non-inclusive)
* @prot: page protection flags to use
* @pages: pages to map (always PAGE_SIZE pages)
* @page_shift: maximum shift that the pages may be mapped with, @pages must
* be aligned and contiguous up to at least this shift.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
static int vmap_pages_range(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift)
{
@ -559,40 +580,6 @@ static int vmap_pages_range(unsigned long addr, unsigned long end,
return err;
}
/**
* map_kernel_range_noflush - map kernel VM area with the specified pages
* @addr: start of the VM area to map
* @size: size of the VM area to map
* @prot: page protection flags to use
* @pages: pages to map
*
* Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should
* have been allocated using get_vm_area() and its friends.
*
* NOTE:
* This function does NOT do any cache flushing. The caller is responsible for
* calling flush_cache_vmap() on to-be-mapped areas before calling this
* function.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
pgprot_t prot, struct page **pages)
{
return vmap_pages_range_noflush(addr, addr + size, prot, pages, PAGE_SHIFT);
}
int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
struct page **pages)
{
int ret;
ret = map_kernel_range_noflush(start, size, prot, pages);
flush_cache_vmap(start, start + size);
return ret;
}
int is_vmalloc_or_module_addr(const void *x)
{
/*
@ -2156,10 +2143,12 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node)
kasan_unpoison_vmalloc(mem, size);
if (map_kernel_range(addr, size, PAGE_KERNEL, pages) < 0) {
if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
pages, PAGE_SHIFT) < 0) {
vm_unmap_ram(mem, count);
return NULL;
}
return mem;
}
EXPORT_SYMBOL(vm_map_ram);
@ -2703,6 +2692,7 @@ void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot)
{
struct vm_struct *area;
unsigned long addr;
unsigned long size; /* In bytes */
might_sleep();
@ -2715,8 +2705,9 @@ void *vmap(struct page **pages, unsigned int count,
if (!area)
return NULL;
if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot),
pages) < 0) {
addr = (unsigned long)area->addr;
if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
pages, PAGE_SHIFT) < 0) {
vunmap(area->addr);
return NULL;
}