zsmalloc: do not scan for allocated objects in empty zspage
Patch series "zsmalloc: small compaction improvements", v2. A tiny series that can reduce the number of find_alloced_obj() invocations (which perform a linear scan of sub-page) during compaction. Inspired by Alexey Romanov's findings. This patch (of 3): zspage migration can terminate as soon as it moves the last allocated object from the source zspage. Add a simple helper zspage_empty() that tests zspage ->inuse on each migration iteration. Link: https://lkml.kernel.org/r/20230624053120.643409-2-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org> Suggested-by: Alexey Romanov <AVRomanov@sberdevices.ru> Reviewed-by: Alexey Romanov <avromanov@sberdevices.ru> Acked-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
3fade62b62
commit
df9cd3cbf2
|
@ -1147,6 +1147,11 @@ static bool zspage_full(struct size_class *class, struct zspage *zspage)
|
|||
return get_zspage_inuse(zspage) == class->objs_per_zspage;
|
||||
}
|
||||
|
||||
static bool zspage_empty(struct zspage *zspage)
|
||||
{
|
||||
return get_zspage_inuse(zspage) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* zs_lookup_class_index() - Returns index of the zsmalloc &size_class
|
||||
* that hold objects of the provided size.
|
||||
|
@ -1625,6 +1630,10 @@ static void migrate_zspage(struct zs_pool *pool, struct size_class *class,
|
|||
obj_idx++;
|
||||
record_obj(handle, free_obj);
|
||||
obj_free(class->size, used_obj);
|
||||
|
||||
/* Stop if there are no more objects to migrate */
|
||||
if (zspage_empty(get_zspage(s_page)))
|
||||
break;
|
||||
}
|
||||
|
||||
/* Remember last position in this iteration */
|
||||
|
|
Loading…
Reference in New Issue