diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3388ccbab7d6..3d7ab30d4940 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -140,6 +140,9 @@ enum zone_stat_item { NR_DIRTIED, /* page dirtyings since bootup */ NR_WRITTEN, /* page writings since bootup */ NR_PAGES_SCANNED, /* pages scanned since last reclaim */ +#if IS_ENABLED(CONFIG_ZSMALLOC) + NR_ZSPAGES, /* allocated in zsmalloc */ +#endif #ifdef CONFIG_NUMA NUMA_HIT, /* allocated in intended node */ NUMA_MISS, /* allocated in non intended node */ diff --git a/mm/vmstat.c b/mm/vmstat.c index cb2a67bb4158..2a0f26bdae39 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -718,7 +718,9 @@ const char * const vmstat_text[] = { "nr_dirtied", "nr_written", "nr_pages_scanned", - +#if IS_ENABLED(CONFIG_ZSMALLOC) + "nr_zspages", +#endif #ifdef CONFIG_NUMA "numa_hit", "numa_miss", diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 6b6986a02aa0..e4e8081b160b 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1007,6 +1007,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class, next = get_next_page(page); reset_page(page); unlock_page(page); + dec_zone_page_state(page, NR_ZSPAGES); put_page(page); page = next; } while (page != NULL); @@ -1137,11 +1138,15 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, page = alloc_page(gfp); if (!page) { - while (--i >= 0) + while (--i >= 0) { + dec_zone_page_state(pages[i], NR_ZSPAGES); __free_page(pages[i]); + } cache_free_zspage(pool, zspage); return NULL; } + + inc_zone_page_state(page, NR_ZSPAGES); pages[i] = page; }