mm/swap: add helper swap_offset_available()

Add helper swap_offset_available() to remove some duplicated codes.  Minor
readability improvement.

[akpm@linux-foundation.org: s/swap_offset_available/swap_offset_available_and_locked/, per Neil]
Link: https://lkml.kernel.org/r/20220509131416.17553-12-linmiaohe@huawei.com
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: NeilBrown <neilb@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Miaohe Lin 2022-05-19 14:08:52 -07:00 committed by akpm
parent eacde32757
commit 4b9ae8426c
1 changed files with 18 additions and 16 deletions

View File

@ -775,6 +775,22 @@ static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
this_cpu_write(*si->cluster_next_cpu, next);
}
static bool swap_offset_available_and_locked(struct swap_info_struct *si,
unsigned long offset)
{
if (data_race(!si->swap_map[offset])) {
spin_lock(&si->lock);
return true;
}
if (vm_swap_full() && READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
spin_lock(&si->lock);
return true;
}
return false;
}
static int scan_swap_map_slots(struct swap_info_struct *si,
unsigned char usage, int nr,
swp_entry_t slots[])
@ -952,15 +968,8 @@ done:
scan:
spin_unlock(&si->lock);
while (++offset <= READ_ONCE(si->highest_bit)) {
if (data_race(!si->swap_map[offset])) {
spin_lock(&si->lock);
if (swap_offset_available_and_locked(si, offset))
goto checks;
}
if (vm_swap_full() &&
READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
spin_lock(&si->lock);
goto checks;
}
if (unlikely(--latency_ration < 0)) {
cond_resched();
latency_ration = LATENCY_LIMIT;
@ -969,15 +978,8 @@ scan:
}
offset = si->lowest_bit;
while (offset < scan_base) {
if (data_race(!si->swap_map[offset])) {
spin_lock(&si->lock);
if (swap_offset_available_and_locked(si, offset))
goto checks;
}
if (vm_swap_full() &&
READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
spin_lock(&si->lock);
goto checks;
}
if (unlikely(--latency_ration < 0)) {
cond_resched();
latency_ration = LATENCY_LIMIT;