erofs: refine managed cache operations to folios

Convert erofs_try_to_free_all_cached_pages() and
z_erofs_cache_release_folio().

Besides, erofs_page_is_managed() is moved to zdata.c and renamed
as erofs_folio_is_managed().

Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240305091448.1384242-6-hsiangkao@linux.alibaba.com
This commit is contained in:
Gao Xiang 2024-03-05 17:14:48 +08:00
parent 9266f2dc5e
commit 706fd68fce
6 changed files with 34 additions and 48 deletions

View File

@ -81,13 +81,6 @@ static inline bool z_erofs_put_shortlivedpage(struct page **pagepool,
return true; return true;
} }
#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
struct page *page)
{
return page->mapping == MNGD_MAPPING(sbi);
}
int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
unsigned int padbufsize); unsigned int padbufsize);
extern const struct z_erofs_decompressor erofs_decompressors[]; extern const struct z_erofs_decompressor erofs_decompressors[];

View File

@ -212,9 +212,6 @@ again:
if (rq->out[no] != rq->in[j]) if (rq->out[no] != rq->in[j])
continue; continue;
DBG_BUGON(erofs_page_is_managed(EROFS_SB(sb),
rq->in[j]));
tmppage = erofs_allocpage(pgpl, rq->gfp); tmppage = erofs_allocpage(pgpl, rq->gfp);
if (!tmppage) { if (!tmppage) {
err = -ENOMEM; err = -ENOMEM;

View File

@ -258,9 +258,6 @@ again:
if (rq->out[no] != rq->in[j]) if (rq->out[no] != rq->in[j])
continue; continue;
DBG_BUGON(erofs_page_is_managed(EROFS_SB(rq->sb),
rq->in[j]));
tmppage = erofs_allocpage(pgpl, rq->gfp); tmppage = erofs_allocpage(pgpl, rq->gfp);
if (!tmppage) { if (!tmppage) {
err = -ENOMEM; err = -ENOMEM;

View File

@ -467,8 +467,8 @@ int __init erofs_init_shrinker(void);
void erofs_exit_shrinker(void); void erofs_exit_shrinker(void);
int __init z_erofs_init_zip_subsystem(void); int __init z_erofs_init_zip_subsystem(void);
void z_erofs_exit_zip_subsystem(void); void z_erofs_exit_zip_subsystem(void);
int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
struct erofs_workgroup *egrp); struct erofs_workgroup *egrp);
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
int flags); int flags);
void *erofs_get_pcpubuf(unsigned int requiredpages); void *erofs_get_pcpubuf(unsigned int requiredpages);

View File

@ -129,7 +129,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
* the XArray. Otherwise some cached pages could be still attached to * the XArray. Otherwise some cached pages could be still attached to
* the orphan old workgroup when the new one is available in the tree. * the orphan old workgroup when the new one is available in the tree.
*/ */
if (erofs_try_to_free_all_cached_pages(sbi, grp)) if (erofs_try_to_free_all_cached_folios(sbi, grp))
goto out; goto out;
/* /*

View File

@ -119,6 +119,12 @@ static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT; return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT;
} }
#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
static bool erofs_folio_is_managed(struct erofs_sb_info *sbi, struct folio *fo)
{
return fo->mapping == MNGD_MAPPING(sbi);
}
/* /*
* bit 30: I/O error occurred on this folio * bit 30: I/O error occurred on this folio
* bit 0 - 29: remaining parts to complete this folio * bit 0 - 29: remaining parts to complete this folio
@ -611,9 +617,9 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
} }
/* called by erofs_shrinker to get rid of all compressed_pages */ /* called by erofs_shrinker to get rid of all cached compressed bvecs */
int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
struct erofs_workgroup *grp) struct erofs_workgroup *grp)
{ {
struct z_erofs_pcluster *const pcl = struct z_erofs_pcluster *const pcl =
container_of(grp, struct z_erofs_pcluster, obj); container_of(grp, struct z_erofs_pcluster, obj);
@ -621,27 +627,22 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
int i; int i;
DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
/* /* There is no actice user since the pcluster is now freezed */
* refcount of workgroup is now freezed as 0,
* therefore no need to worry about available decompression users.
*/
for (i = 0; i < pclusterpages; ++i) { for (i = 0; i < pclusterpages; ++i) {
struct page *page = pcl->compressed_bvecs[i].page; struct folio *folio = pcl->compressed_bvecs[i].folio;
if (!page) if (!folio)
continue; continue;
/* block other users from reclaiming or migrating the page */ /* Avoid reclaiming or migrating this folio */
if (!trylock_page(page)) if (!folio_trylock(folio))
return -EBUSY; return -EBUSY;
if (!erofs_page_is_managed(sbi, page)) if (!erofs_folio_is_managed(sbi, folio))
continue; continue;
pcl->compressed_bvecs[i].folio = NULL;
/* barrier is implied in the following 'unlock_page' */ folio_detach_private(folio);
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); folio_unlock(folio);
detach_page_private(page);
unlock_page(page);
} }
return 0; return 0;
} }
@ -658,20 +659,17 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
ret = false; ret = false;
spin_lock(&pcl->obj.lockref.lock); spin_lock(&pcl->obj.lockref.lock);
if (pcl->obj.lockref.count > 0) if (pcl->obj.lockref.count <= 0) {
goto out; DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
for (i = 0; i < pclusterpages; ++i) {
DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); if (pcl->compressed_bvecs[i].folio == folio) {
for (i = 0; i < pclusterpages; ++i) { pcl->compressed_bvecs[i].folio = NULL;
if (pcl->compressed_bvecs[i].page == &folio->page) { folio_detach_private(folio);
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); ret = true;
ret = true; break;
break; }
} }
} }
if (ret)
folio_detach_private(folio);
out:
spin_unlock(&pcl->obj.lockref.lock); spin_unlock(&pcl->obj.lockref.lock);
return ret; return ret;
} }
@ -1201,7 +1199,7 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
be->compressed_pages[i] = page; be->compressed_pages[i] = page;
if (z_erofs_is_inline_pcluster(pcl) || if (z_erofs_is_inline_pcluster(pcl) ||
erofs_page_is_managed(EROFS_SB(be->sb), page)) { erofs_folio_is_managed(EROFS_SB(be->sb), page_folio(page))) {
if (!PageUptodate(page)) if (!PageUptodate(page))
err = -EIO; err = -EIO;
continue; continue;
@ -1286,7 +1284,8 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
/* consider shortlived pages added when decompressing */ /* consider shortlived pages added when decompressing */
page = be->compressed_pages[i]; page = be->compressed_pages[i];
if (!page || erofs_page_is_managed(sbi, page)) if (!page ||
erofs_folio_is_managed(sbi, page_folio(page)))
continue; continue;
(void)z_erofs_put_shortlivedpage(be->pagepool, page); (void)z_erofs_put_shortlivedpage(be->pagepool, page);
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
@ -1573,7 +1572,7 @@ static void z_erofs_submissionqueue_endio(struct bio *bio)
DBG_BUGON(folio_test_uptodate(folio)); DBG_BUGON(folio_test_uptodate(folio));
DBG_BUGON(z_erofs_page_is_invalidated(&folio->page)); DBG_BUGON(z_erofs_page_is_invalidated(&folio->page));
if (!erofs_page_is_managed(EROFS_SB(q->sb), &folio->page)) if (!erofs_folio_is_managed(EROFS_SB(q->sb), folio))
continue; continue;
if (!err) if (!err)