mm/swapfile.c: fix comment typos

Signed-off-by: Seth Jennings <sjenning@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Seth Jennings 2013-11-12 15:07:46 -08:00 committed by Linus Torvalds
parent 7f88f88f83
commit 2de1a7e40a
1 changed files with 7 additions and 7 deletions

View File

@ -707,7 +707,7 @@ noswap:
return (swp_entry_t) {0};
}
/* The only caller of this function is now susupend routine */
/* The only caller of this function is now suspend routine */
swp_entry_t get_swap_page_of_type(int type)
{
struct swap_info_struct *si;
@ -845,7 +845,7 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
}
/*
* Caller has made sure that the swapdevice corresponding to entry
* Caller has made sure that the swap device corresponding to entry
* is still around or has not been recycled.
*/
void swap_free(swp_entry_t entry)
@ -947,7 +947,7 @@ int try_to_free_swap(struct page *page)
* original page might be freed under memory pressure, then
* later read back in from swap, now with the wrong data.
*
* Hibration suspends storage while it is writing the image
* Hibernation suspends storage while it is writing the image
* to disk so check that here.
*/
if (pm_suspended_storage())
@ -1179,7 +1179,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
* some architectures (e.g. x86_32 with PAE) we might catch a glimpse
* of unmatched parts which look like swp_pte, so unuse_pte must
* recheck under pte lock. Scanning without pte lock lets it be
* preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
* preemptable whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
*/
pte = pte_offset_map(pmd, addr);
do {
@ -1934,7 +1934,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
vfree(swap_map);
vfree(cluster_info);
vfree(frontswap_map);
/* Destroy swap account informatin */
/* Destroy swap account information */
swap_cgroup_swapoff(type);
inode = mapping->host;
@ -2786,8 +2786,8 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
/*
* We are fortunate that although vmalloc_to_page uses pte_offset_map,
* no architecture is using highmem pages for kernel pagetables: so it
* will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps.
* no architecture is using highmem pages for kernel page tables: so it
* will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
*/
head = vmalloc_to_page(si->swap_map + offset);
offset &= ~PAGE_MASK;