mm: eliminate "expecting prototype" kernel-doc warnings

Fix stray kernel-doc warnings in mm/ due to mis-typed or missing function
names.

Quietens these kernel-doc warnings:

  mm/mmu_gather.c:264: warning: expecting prototype for tlb_gather_mmu(). Prototype was for __tlb_gather_mmu() instead
  mm/oom_kill.c:180: warning: expecting prototype for Check whether unreclaimable slab amount is greater than(). Prototype was for should_dump_unreclaim_slab() instead
  mm/shuffle.c:155: warning: expecting prototype for shuffle_free_memory(). Prototype was for __shuffle_free_memory() instead

Link: https://lkml.kernel.org/r/20210411210642.11362-1-rdunlap@infradead.org
Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Randy Dunlap 2021-04-16 15:45:54 -07:00 committed by Linus Torvalds
parent 06c2aac401
commit 845be1cd34
3 changed files with 22 additions and 13 deletions

View File

@ -249,16 +249,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
tlb_flush_mmu_free(tlb);
}
/**
* tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
* @tlb: the mmu_gather structure to initialize
* @mm: the mm_struct of the target address space
* @fullmm: @mm is without users and we're going to destroy the full address
* space (exit/execve)
*
* Called to initialize an (on-stack) mmu_gather structure for page-table
* tear-down from @mm.
*/
static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
bool fullmm)
{
@ -283,11 +273,30 @@ static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
inc_tlb_flush_pending(tlb->mm);
}
/**
* tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
* @tlb: the mmu_gather structure to initialize
* @mm: the mm_struct of the target address space
*
* Called to initialize an (on-stack) mmu_gather structure for page-table
* tear-down from @mm.
*/
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
{
__tlb_gather_mmu(tlb, mm, false);
}
/**
* tlb_gather_mmu_fullmm - initialize an mmu_gather structure for page-table tear-down
* @tlb: the mmu_gather structure to initialize
* @mm: the mm_struct of the target address space
*
* In this case, @mm is without users and we're going to destroy the
* full address space (exit/execve).
*
* Called to initialize an (on-stack) mmu_gather structure for page-table
* tear-down from @mm.
*/
void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm)
{
__tlb_gather_mmu(tlb, mm, true);

View File

@ -170,7 +170,7 @@ static bool oom_unkillable_task(struct task_struct *p)
return false;
}
/**
/*
* Check whether unreclaimable slab amount is greater than
* all user memory(LRU pages).
* dump_unreclaimable_slab() could help in the case that

View File

@ -147,8 +147,8 @@ void __meminit __shuffle_zone(struct zone *z)
spin_unlock_irqrestore(&z->lock, flags);
}
/**
* shuffle_free_memory - reduce the predictability of the page allocator
/*
* __shuffle_free_memory - reduce the predictability of the page allocator
* @pgdat: node page data
*/
void __meminit __shuffle_free_memory(pg_data_t *pgdat)