zsmalloc: cosmetic compaction code adjustments

Change zs_object_copy() argument order to be (DST, SRC) rather than
(SRC, DST).  copy/move functions usually have (to, from) arguments
order.

Rename alloc_target_page() to isolate_target_page().  This function
doesn't allocate anything, it isolates target page, pretty much like
isolate_source_page().

Tweak __zs_compact() comment.

Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Sergey Senozhatsky 2015-09-08 15:04:33 -07:00 committed by Linus Torvalds
parent 04f05909e0
commit 0dc63d488a
1 changed files with 6 additions and 6 deletions

View File

@ -1471,7 +1471,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
}
EXPORT_SYMBOL_GPL(zs_free);
static void zs_object_copy(unsigned long src, unsigned long dst,
static void zs_object_copy(unsigned long dst, unsigned long src,
struct size_class *class)
{
struct page *s_page, *d_page;
@ -1612,7 +1612,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
used_obj = handle_to_obj(handle);
free_obj = obj_malloc(d_page, class, handle);
zs_object_copy(used_obj, free_obj, class);
zs_object_copy(free_obj, used_obj, class);
index++;
record_obj(handle, free_obj);
unpin_tag(handle);
@ -1628,7 +1628,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
return ret;
}
static struct page *alloc_target_page(struct size_class *class)
static struct page *isolate_target_page(struct size_class *class)
{
int i;
struct page *page;
@ -1718,11 +1718,11 @@ static unsigned long __zs_compact(struct zs_pool *pool,
cc.index = 0;
cc.s_page = src_page;
while ((dst_page = alloc_target_page(class))) {
while ((dst_page = isolate_target_page(class))) {
cc.d_page = dst_page;
/*
* If there is no more space in dst_page, try to
* allocate another zspage.
* If there is no more space in dst_page, resched
* and see if anyone had allocated another zspage.
*/
if (!migrate_zspage(pool, class, &cc))
break;