erofs: use meta buffers for zmap operations

Get rid of old erofs_get_meta_page() within zmap operations by
using on-stack meta buffers in order to prepare subpage and folio
features.

Finally, erofs_get_meta_page() is useless. Get rid of it!

Link: https://lore.kernel.org/r/20220102040017.51352-6-hsiangkao@linux.alibaba.com
Reviewed-by: Yue Hu <huyue2@yulong.com>
Reviewed-by: Liu Bo <bo.liu@linux.alibaba.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
This commit is contained in:
Gao Xiang 2022-01-02 12:00:17 +08:00
parent bb88e8da00
commit 09c543798c
4 changed files with 28 additions and 70 deletions

View file

@ -9,19 +9,6 @@
#include <linux/dax.h>
#include <trace/events/erofs.h>
struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
{
struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
struct page *page;
page = read_cache_page_gfp(mapping, blkaddr,
mapping_gfp_constraint(mapping, ~__GFP_FS));
/* should already be PageUptodate */
if (!IS_ERR(page))
lock_page(page);
return page;
}
void erofs_unmap_metabuf(struct erofs_buf *buf)
{
if (buf->kmap_type == EROFS_KMAP)

View file

@ -419,14 +419,14 @@ enum {
#define EROFS_MAP_FULL_MAPPED (1 << BH_FullMapped)
struct erofs_map_blocks {
struct erofs_buf buf;
erofs_off_t m_pa, m_la;
u64 m_plen, m_llen;
unsigned short m_deviceid;
char m_algorithmformat;
unsigned int m_flags;
struct page *mpage;
};
/* Flags used by erofs_map_blocks_flatmode() */
@ -474,7 +474,7 @@ struct erofs_map_dev {
/* data.c */
extern const struct file_operations erofs_file_fops;
struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr);
void erofs_unmap_metabuf(struct erofs_buf *buf);
void erofs_put_metabuf(struct erofs_buf *buf);
void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
erofs_blk_t blkaddr, enum erofs_kmap_type type);

View file

@ -698,20 +698,18 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
goto err_out;
if (z_erofs_is_inline_pcluster(clt->pcl)) {
struct page *mpage;
void *mp;
mpage = erofs_get_meta_page(inode->i_sb,
erofs_blknr(map->m_pa));
if (IS_ERR(mpage)) {
err = PTR_ERR(mpage);
mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb,
erofs_blknr(map->m_pa), EROFS_NO_KMAP);
if (IS_ERR(mp)) {
err = PTR_ERR(mp);
erofs_err(inode->i_sb,
"failed to get inline page, err %d", err);
goto err_out;
}
/* TODO: new subpage feature will get rid of it */
unlock_page(mpage);
WRITE_ONCE(clt->pcl->compressed_pages[0], mpage);
get_page(fe->map.buf.page);
WRITE_ONCE(clt->pcl->compressed_pages[0], fe->map.buf.page);
clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
} else {
/* preload all compressed pages (can change mode if needed) */
@ -1529,9 +1527,7 @@ static int z_erofs_readpage(struct file *file, struct page *page)
if (err)
erofs_err(inode->i_sb, "failed to read, err [%d]", err);
if (f.map.mpage)
put_page(f.map.mpage);
erofs_put_metabuf(&f.map.buf);
erofs_release_pages(&pagepool);
return err;
}
@ -1576,8 +1572,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
z_erofs_runqueue(inode->i_sb, &f, &pagepool,
z_erofs_get_sync_decompress_policy(sbi, nr_pages));
if (f.map.mpage)
put_page(f.map.mpage);
erofs_put_metabuf(&f.map.buf);
erofs_release_pages(&pagepool);
}

View file

@ -35,7 +35,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
struct super_block *const sb = inode->i_sb;
int err, headnr;
erofs_off_t pos;
struct page *page;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
void *kaddr;
struct z_erofs_map_header *h;
@ -61,14 +61,13 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
vi->xattr_isize, 8);
page = erofs_get_meta_page(sb, erofs_blknr(pos));
if (IS_ERR(page)) {
err = PTR_ERR(page);
kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos),
EROFS_KMAP_ATOMIC);
if (IS_ERR(kaddr)) {
err = PTR_ERR(kaddr);
goto out_unlock;
}
kaddr = kmap_atomic(page);
h = kaddr + erofs_blkoff(pos);
vi->z_advise = le16_to_cpu(h->h_advise);
vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
@ -101,20 +100,19 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
goto unmap_done;
}
unmap_done:
kunmap_atomic(kaddr);
unlock_page(page);
put_page(page);
erofs_put_metabuf(&buf);
if (err)
goto out_unlock;
if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
struct erofs_map_blocks map = { .mpage = NULL };
struct erofs_map_blocks map = {
.buf = __EROFS_BUF_INITIALIZER
};
vi->z_idata_size = le16_to_cpu(h->h_idata_size);
err = z_erofs_do_map_blocks(inode, &map,
EROFS_GET_BLOCKS_FINDTAIL);
if (map.mpage)
put_page(map.mpage);
erofs_put_metabuf(&map.buf);
if (!map.m_plen ||
erofs_blkoff(map.m_pa) + map.m_plen > EROFS_BLKSIZ) {
@ -151,31 +149,11 @@ static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
erofs_blk_t eblk)
{
struct super_block *const sb = m->inode->i_sb;
struct erofs_map_blocks *const map = m->map;
struct page *mpage = map->mpage;
if (mpage) {
if (mpage->index == eblk) {
if (!m->kaddr)
m->kaddr = kmap_atomic(mpage);
return 0;
}
if (m->kaddr) {
kunmap_atomic(m->kaddr);
m->kaddr = NULL;
}
put_page(mpage);
}
mpage = erofs_get_meta_page(sb, eblk);
if (IS_ERR(mpage)) {
map->mpage = NULL;
return PTR_ERR(mpage);
}
m->kaddr = kmap_atomic(mpage);
unlock_page(mpage);
map->mpage = mpage;
m->kaddr = erofs_read_metabuf(&m->map->buf, sb, eblk,
EROFS_KMAP_ATOMIC);
if (IS_ERR(m->kaddr))
return PTR_ERR(m->kaddr);
return 0;
}
@ -711,8 +689,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
map->m_flags |= EROFS_MAP_FULL_MAPPED;
}
unmap_out:
if (m.kaddr)
kunmap_atomic(m.kaddr);
erofs_unmap_metabuf(&m.map->buf);
out:
erofs_dbg("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
@ -759,8 +736,7 @@ static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
struct erofs_map_blocks map = { .m_la = offset };
ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP);
if (map.mpage)
put_page(map.mpage);
erofs_put_metabuf(&map.buf);
if (ret < 0)
return ret;