mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-28 13:22:57 +00:00
mm/swapfile.c: use __try_to_reclaim_swap() in free_swap_and_cache()
The code path to reclaim the swap entry in free_swap_and_cache() is almost same as that of __try_to_reclaim_swap(). The largest difference is just coding style. So the support to the additional requirement of free_swap_and_cache() is added into __try_to_reclaim_swap(). free_swap_and_cache() is changed to call __try_to_reclaim_swap(), and delete the duplicated code. This will improve code readability and reduce the potential bugs. There are 2 functionality differences between __try_to_reclaim_swap() and swap entry reclaim code of free_swap_and_cache(). - free_swap_and_cache() only reclaims the swap entry if the page is unmapped or swap is getting full. The support has been added into __try_to_reclaim_swap(). - try_to_free_swap() (called by __try_to_reclaim_swap()) checks pm_suspended_storage(), while free_swap_and_cache() not. I think this is OK. Because the page and the swap entry can be reclaimed later eventually. Link: http://lkml.kernel.org/r/20180827075535.17406-2-ying.huang@intel.com Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Shaohua Li <shli@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
154221c3e5
commit
bcd49e8671
1 changed files with 25 additions and 32 deletions
|
@ -103,26 +103,39 @@ static inline unsigned char swap_count(unsigned char ent)
|
||||||
return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */
|
return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Reclaim the swap entry anyway if possible */
|
||||||
|
#define TTRS_ANYWAY 0x1
|
||||||
|
/*
|
||||||
|
* Reclaim the swap entry if there are no more mappings of the
|
||||||
|
* corresponding page
|
||||||
|
*/
|
||||||
|
#define TTRS_UNMAPPED 0x2
|
||||||
|
/* Reclaim the swap entry if swap is getting full*/
|
||||||
|
#define TTRS_FULL 0x4
|
||||||
|
|
||||||
/* returns 1 if swap entry is freed */
|
/* returns 1 if swap entry is freed */
|
||||||
static int
|
static int __try_to_reclaim_swap(struct swap_info_struct *si,
|
||||||
__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
|
unsigned long offset, unsigned long flags)
|
||||||
{
|
{
|
||||||
swp_entry_t entry = swp_entry(si->type, offset);
|
swp_entry_t entry = swp_entry(si->type, offset);
|
||||||
struct page *page;
|
struct page *page;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
page = find_get_page(swap_address_space(entry), swp_offset(entry));
|
page = find_get_page(swap_address_space(entry), offset);
|
||||||
if (!page)
|
if (!page)
|
||||||
return 0;
|
return 0;
|
||||||
/*
|
/*
|
||||||
* This function is called from scan_swap_map() and it's called
|
* When this function is called from scan_swap_map_slots() and it's
|
||||||
* by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
|
* called by vmscan.c at reclaiming pages. So, we hold a lock on a page,
|
||||||
* We have to use trylock for avoiding deadlock. This is a special
|
* here. We have to use trylock for avoiding deadlock. This is a special
|
||||||
* case and you should use try_to_free_swap() with explicit lock_page()
|
* case and you should use try_to_free_swap() with explicit lock_page()
|
||||||
* in usual operations.
|
* in usual operations.
|
||||||
*/
|
*/
|
||||||
if (trylock_page(page)) {
|
if (trylock_page(page)) {
|
||||||
ret = try_to_free_swap(page);
|
if ((flags & TTRS_ANYWAY) ||
|
||||||
|
((flags & TTRS_UNMAPPED) && !page_mapped(page)) ||
|
||||||
|
((flags & TTRS_FULL) && mem_cgroup_swap_full(page)))
|
||||||
|
ret = try_to_free_swap(page);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
}
|
}
|
||||||
put_page(page);
|
put_page(page);
|
||||||
|
@ -780,7 +793,7 @@ static int scan_swap_map_slots(struct swap_info_struct *si,
|
||||||
int swap_was_freed;
|
int swap_was_freed;
|
||||||
unlock_cluster(ci);
|
unlock_cluster(ci);
|
||||||
spin_unlock(&si->lock);
|
spin_unlock(&si->lock);
|
||||||
swap_was_freed = __try_to_reclaim_swap(si, offset);
|
swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
|
||||||
spin_lock(&si->lock);
|
spin_lock(&si->lock);
|
||||||
/* entry was freed successfully, try to use this again */
|
/* entry was freed successfully, try to use this again */
|
||||||
if (swap_was_freed)
|
if (swap_was_freed)
|
||||||
|
@ -1612,7 +1625,6 @@ int try_to_free_swap(struct page *page)
|
||||||
int free_swap_and_cache(swp_entry_t entry)
|
int free_swap_and_cache(swp_entry_t entry)
|
||||||
{
|
{
|
||||||
struct swap_info_struct *p;
|
struct swap_info_struct *p;
|
||||||
struct page *page = NULL;
|
|
||||||
unsigned char count;
|
unsigned char count;
|
||||||
|
|
||||||
if (non_swap_entry(entry))
|
if (non_swap_entry(entry))
|
||||||
|
@ -1622,31 +1634,12 @@ int free_swap_and_cache(swp_entry_t entry)
|
||||||
if (p) {
|
if (p) {
|
||||||
count = __swap_entry_free(p, entry, 1);
|
count = __swap_entry_free(p, entry, 1);
|
||||||
if (count == SWAP_HAS_CACHE &&
|
if (count == SWAP_HAS_CACHE &&
|
||||||
!swap_page_trans_huge_swapped(p, entry)) {
|
!swap_page_trans_huge_swapped(p, entry))
|
||||||
page = find_get_page(swap_address_space(entry),
|
__try_to_reclaim_swap(p, swp_offset(entry),
|
||||||
swp_offset(entry));
|
TTRS_UNMAPPED | TTRS_FULL);
|
||||||
if (page && !trylock_page(page)) {
|
else if (!count)
|
||||||
put_page(page);
|
|
||||||
page = NULL;
|
|
||||||
}
|
|
||||||
} else if (!count)
|
|
||||||
free_swap_slot(entry);
|
free_swap_slot(entry);
|
||||||
}
|
}
|
||||||
if (page) {
|
|
||||||
/*
|
|
||||||
* Not mapped elsewhere, or swap space full? Free it!
|
|
||||||
* Also recheck PageSwapCache now page is locked (above).
|
|
||||||
*/
|
|
||||||
if (PageSwapCache(page) && !PageWriteback(page) &&
|
|
||||||
(!page_mapped(page) || mem_cgroup_swap_full(page)) &&
|
|
||||||
!swap_page_trans_huge_swapped(p, entry)) {
|
|
||||||
page = compound_head(page);
|
|
||||||
delete_from_swap_cache(page);
|
|
||||||
SetPageDirty(page);
|
|
||||||
}
|
|
||||||
unlock_page(page);
|
|
||||||
put_page(page);
|
|
||||||
}
|
|
||||||
return p != NULL;
|
return p != NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue