f2fs: compress: introduce page array slab cache

Add a per-sbi slab cache "f2fs_page_array_entry-%u:%u" for memory
allocation of page pointer array in compress context.

Signed-off-by: Chao Yu <yuchao0@huawei.com>
[Jaegeuk Kim: Fix wrong memory allocation]
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Chao Yu 2020-09-14 17:05:13 +08:00 committed by Jaegeuk Kim
parent 3a22e9ac71
commit 3108303170
3 changed files with 102 additions and 31 deletions

View File

@ -17,6 +17,30 @@
#include "node.h"
#include <trace/events/f2fs.h>
static void *page_array_alloc(struct inode *inode, int nr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int size = sizeof(struct page *) * nr;
if (likely(size <= sbi->page_array_slab_size))
return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
return f2fs_kzalloc(sbi, size, GFP_NOFS);
}
static void page_array_free(struct inode *inode, void *pages, int nr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int size = sizeof(struct page *) * nr;
if (!pages)
return;
if (likely(size <= sbi->page_array_slab_size))
kmem_cache_free(sbi->page_array_slab, pages);
else
kfree(pages);
}
struct f2fs_compress_ops {
int (*init_compress_ctx)(struct compress_ctx *cc);
void (*destroy_compress_ctx)(struct compress_ctx *cc);
@ -130,19 +154,16 @@ struct page *f2fs_compress_control_page(struct page *page)
int f2fs_init_compress_ctx(struct compress_ctx *cc)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
if (cc->nr_rpages)
return 0;
cc->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
cc->log_cluster_size, GFP_NOFS);
cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
return cc->rpages ? 0 : -ENOMEM;
}
void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
{
kfree(cc->rpages);
page_array_free(cc->inode, cc->rpages, cc->cluster_size);
cc->rpages = NULL;
cc->nr_rpages = 0;
cc->nr_cpages = 0;
@ -573,11 +594,11 @@ static void *f2fs_vmap(struct page **pages, unsigned int count)
static int f2fs_compress_pages(struct compress_ctx *cc)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
struct f2fs_inode_info *fi = F2FS_I(cc->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
unsigned int max_len, nr_cpages;
unsigned int max_len, new_nr_cpages;
struct page **new_cpages;
int i, ret;
trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
@ -592,8 +613,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
max_len = COMPRESS_HEADER_SIZE + cc->clen;
cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
cc->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
cc->nr_cpages, GFP_NOFS);
cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
if (!cc->cpages) {
ret = -ENOMEM;
goto destroy_compress_ctx;
@ -635,16 +655,28 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
cc->cbuf->reserved[i] = cpu_to_le32(0);
nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
/* Now we're going to cut unnecessary tail pages */
new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
if (!new_cpages) {
ret = -ENOMEM;
goto out_vunmap_cbuf;
}
/* zero out any unused part of the last page */
memset(&cc->cbuf->cdata[cc->clen], 0,
(nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
(new_nr_cpages * PAGE_SIZE) -
(cc->clen + COMPRESS_HEADER_SIZE));
vm_unmap_ram(cc->cbuf, cc->nr_cpages);
vm_unmap_ram(cc->rbuf, cc->cluster_size);
for (i = nr_cpages; i < cc->nr_cpages; i++) {
for (i = 0; i < cc->nr_cpages; i++) {
if (i < new_nr_cpages) {
new_cpages[i] = cc->cpages[i];
continue;
}
f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}
@ -652,7 +684,9 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
if (cops->destroy_compress_ctx)
cops->destroy_compress_ctx(cc);
cc->nr_cpages = nr_cpages;
page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
cc->cpages = new_cpages;
cc->nr_cpages = new_nr_cpages;
trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
cc->clen, ret);
@ -667,7 +701,7 @@ out_free_cpages:
if (cc->cpages[i])
f2fs_compress_free_page(cc->cpages[i]);
}
kfree(cc->cpages);
page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
destroy_compress_ctx:
if (cops->destroy_compress_ctx)
@ -706,8 +740,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
goto out_free_dic;
}
dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
dic->cluster_size, GFP_NOFS);
dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
if (!dic->tpages) {
ret = -ENOMEM;
goto out_free_dic;
@ -1046,6 +1079,7 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
{
struct compress_ctx cc = {
.inode = inode,
.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
.cluster_size = F2FS_I(inode)->i_cluster_size,
.rpages = fsdata,
@ -1149,7 +1183,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
*/
down_read(&sbi->node_write);
} else if (!f2fs_trylock_op(sbi)) {
return -EAGAIN;
goto out_free;
}
set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
@ -1179,8 +1213,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
cic->inode = inode;
atomic_set(&cic->pending_pages, cc->nr_cpages);
cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
cc->log_cluster_size, GFP_NOFS);
cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
if (!cic->rpages)
goto out_put_cic;
@ -1274,11 +1307,13 @@ unlock_continue:
spin_unlock(&fi->i_size_lock);
f2fs_put_rpages(cc);
page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
f2fs_destroy_compress_ctx(cc);
return 0;
out_destroy_crypt:
kfree(cic->rpages);
page_array_free(cc->inode, cic->rpages, cc->cluster_size);
for (--i; i >= 0; i--)
fscrypt_finalize_bounce_page(&cc->cpages[i]);
@ -1296,6 +1331,9 @@ out_unlock_op:
up_read(&sbi->node_write);
else
f2fs_unlock_op(sbi);
out_free:
page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
return -EAGAIN;
}
@ -1322,7 +1360,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
end_page_writeback(cic->rpages[i]);
}
kfree(cic->rpages);
page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
kfree(cic);
}
@ -1419,8 +1457,6 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
err = f2fs_write_compressed_pages(cc, submitted,
wbc, io_type);
kfree(cc->cpages);
cc->cpages = NULL;
if (!err)
return 0;
f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
@ -1446,8 +1482,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
if (!dic)
return ERR_PTR(-ENOMEM);
dic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
cc->log_cluster_size, GFP_NOFS);
dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
if (!dic->rpages) {
kfree(dic);
return ERR_PTR(-ENOMEM);
@ -1466,8 +1501,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->rpages[i] = cc->rpages[i];
dic->nr_rpages = cc->cluster_size;
dic->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
dic->nr_cpages, GFP_NOFS);
dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
if (!dic->cpages)
goto out_free;
@ -1502,7 +1536,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
continue;
f2fs_compress_free_page(dic->tpages[i]);
}
kfree(dic->tpages);
page_array_free(dic->inode, dic->tpages, dic->cluster_size);
}
if (dic->cpages) {
@ -1511,10 +1545,10 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
continue;
f2fs_compress_free_page(dic->cpages[i]);
}
kfree(dic->cpages);
page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
}
kfree(dic->rpages);
page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
kfree(dic);
}
@ -1543,3 +1577,25 @@ unlock:
unlock_page(rpage);
}
}
int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
{
dev_t dev = sbi->sb->s_bdev->bd_dev;
char slab_name[32];
sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
sbi->page_array_slab_size = sizeof(struct page *) <<
F2FS_OPTION(sbi).compress_log_size;
sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
sbi->page_array_slab_size);
if (!sbi->page_array_slab)
return -ENOMEM;
return 0;
}
void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
{
kmem_cache_destroy(sbi->page_array_slab);
}

View File

@ -1622,6 +1622,11 @@ struct f2fs_sb_info {
struct kmem_cache *inline_xattr_slab; /* inline xattr entry */
unsigned int inline_xattr_slab_size; /* default inline xattr slab size */
#ifdef CONFIG_F2FS_FS_COMPRESSION
struct kmem_cache *page_array_slab; /* page array entry */
unsigned int page_array_slab_size; /* default page array slab size */
#endif
};
struct f2fs_private_dio {
@ -3929,6 +3934,8 @@ void f2fs_decompress_end_io(struct page **rpages,
int f2fs_init_compress_ctx(struct compress_ctx *cc);
void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
#else
static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
@ -3945,6 +3952,8 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
}
static inline int f2fs_init_compress_mempool(void) { return 0; }
static inline void f2fs_destroy_compress_mempool(void) { }
static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
#endif
static inline void set_compress_context(struct inode *inode)

View File

@ -1288,6 +1288,7 @@ static void f2fs_put_super(struct super_block *sb)
kfree(sbi->raw_super);
destroy_device_list(sbi);
f2fs_destroy_page_array_cache(sbi);
f2fs_destroy_xattr_caches(sbi);
mempool_destroy(sbi->write_io_dummy);
#ifdef CONFIG_QUOTA
@ -3620,13 +3621,16 @@ try_onemore:
err = f2fs_init_xattr_caches(sbi);
if (err)
goto free_io_dummy;
err = f2fs_init_page_array_cache(sbi);
if (err)
goto free_xattr_cache;
/* get an inode for meta space */
sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
if (IS_ERR(sbi->meta_inode)) {
f2fs_err(sbi, "Failed to read F2FS meta data inode");
err = PTR_ERR(sbi->meta_inode);
goto free_xattr_cache;
goto free_page_array_cache;
}
err = f2fs_get_valid_checkpoint(sbi);
@ -3902,6 +3906,8 @@ free_meta_inode:
make_bad_inode(sbi->meta_inode);
iput(sbi->meta_inode);
sbi->meta_inode = NULL;
free_page_array_cache:
f2fs_destroy_page_array_cache(sbi);
free_xattr_cache:
f2fs_destroy_xattr_caches(sbi);
free_io_dummy: