erofs: record the longest decompressed size in this round

Currently, `pcl->length' records the longest decompressed length
as long as the pcluster itself isn't reclaimed.  However, such
number is unneeded for the general cases since it doesn't indicate
the exact decompressed size in this round.

Instead, let's record the decompressed size for this round instead,
thus `pcl->nr_pages' can be completely dropped and pageofs_out is
also designed to be kept in sync with `pcl->length'.

Acked-by: Chao Yu <chao@kernel.org>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20220715154203.48093-16-hsiangkao@linux.alibaba.com
This commit is contained in:
Gao Xiang 2022-07-15 23:42:02 +08:00
parent 3fe96ee0f9
commit 2bfab9c0ed
2 changed files with 31 additions and 58 deletions

View file

@ -482,7 +482,6 @@ static int z_erofs_lookup_pcluster(struct z_erofs_decompress_frontend *fe)
{ {
struct erofs_map_blocks *map = &fe->map; struct erofs_map_blocks *map = &fe->map;
struct z_erofs_pcluster *pcl = fe->pcl; struct z_erofs_pcluster *pcl = fe->pcl;
unsigned int length;
/* to avoid unexpected loop formed by corrupted images */ /* to avoid unexpected loop formed by corrupted images */
if (fe->owned_head == &pcl->next || pcl == fe->tailpcl) { if (fe->owned_head == &pcl->next || pcl == fe->tailpcl) {
@ -495,24 +494,6 @@ static int z_erofs_lookup_pcluster(struct z_erofs_decompress_frontend *fe)
return -EFSCORRUPTED; return -EFSCORRUPTED;
} }
length = READ_ONCE(pcl->length);
if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) {
if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
} else {
unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT;
if (map->m_flags & EROFS_MAP_FULL_MAPPED)
llen |= Z_EROFS_PCLUSTER_FULL_LENGTH;
while (llen > length &&
length != cmpxchg_relaxed(&pcl->length, length, llen)) {
cpu_relax();
length = READ_ONCE(pcl->length);
}
}
mutex_lock(&pcl->lock); mutex_lock(&pcl->lock);
/* used to check tail merging loop due to corrupted images */ /* used to check tail merging loop due to corrupted images */
if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL) if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
@ -543,9 +524,8 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
atomic_set(&pcl->obj.refcount, 1); atomic_set(&pcl->obj.refcount, 1);
pcl->algorithmformat = map->m_algorithmformat; pcl->algorithmformat = map->m_algorithmformat;
pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) | pcl->length = 0;
(map->m_flags & EROFS_MAP_FULL_MAPPED ? pcl->partial = true;
Z_EROFS_PCLUSTER_FULL_LENGTH : 0);
/* new pclusters should be claimed as type 1, primary and followed */ /* new pclusters should be claimed as type 1, primary and followed */
pcl->next = fe->owned_head; pcl->next = fe->owned_head;
@ -703,7 +683,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
bool tight = true, exclusive; bool tight = true, exclusive;
enum z_erofs_cache_alloctype cache_strategy; enum z_erofs_cache_alloctype cache_strategy;
unsigned int cur, end, spiltted, index; unsigned int cur, end, spiltted;
int err = 0; int err = 0;
/* register locked file pages as online pages in pack */ /* register locked file pages as online pages in pack */
@ -806,12 +786,17 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
/* bump up the number of spiltted parts of a page */ /* bump up the number of spiltted parts of a page */
++spiltted; ++spiltted;
/* also update nr_pages */ if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
index = page->index - (map->m_la >> PAGE_SHIFT); fe->pcl->length == map->m_llen)
fe->pcl->nr_pages = max_t(pgoff_t, fe->pcl->nr_pages, index + 1); fe->pcl->partial = false;
if (fe->pcl->length < offset + end - map->m_la) {
fe->pcl->length = offset + end - map->m_la;
fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
}
next_part: next_part:
/* can be used for verification */ /* shorten the remaining extent to update progress */
map->m_llen = offset + cur - map->m_la; map->m_llen = offset + cur - map->m_la;
map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
end = cur; end = cur;
if (end > 0) if (end > 0)
@ -858,7 +843,7 @@ struct z_erofs_decompress_backend {
struct page **compressed_pages; struct page **compressed_pages;
struct page **pagepool; struct page **pagepool;
unsigned int onstack_used; unsigned int onstack_used, nr_pages;
}; };
static int z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be, static int z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
@ -867,7 +852,7 @@ static int z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
unsigned int pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT; unsigned int pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
struct page *oldpage; struct page *oldpage;
DBG_BUGON(pgnr >= be->pcl->nr_pages); DBG_BUGON(pgnr >= be->nr_pages);
oldpage = be->decompressed_pages[pgnr]; oldpage = be->decompressed_pages[pgnr];
be->decompressed_pages[pgnr] = bvec->page; be->decompressed_pages[pgnr] = bvec->page;
@ -955,24 +940,23 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
struct erofs_sb_info *const sbi = EROFS_SB(be->sb); struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
struct z_erofs_pcluster *pcl = be->pcl; struct z_erofs_pcluster *pcl = be->pcl;
unsigned int pclusterpages = z_erofs_pclusterpages(pcl); unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
unsigned int i, inputsize, outputsize, llen, nr_pages; unsigned int i, inputsize;
struct page *page;
int err2; int err2;
bool overlapped, partial; struct page *page;
bool overlapped;
DBG_BUGON(!READ_ONCE(pcl->nr_pages));
mutex_lock(&pcl->lock); mutex_lock(&pcl->lock);
nr_pages = pcl->nr_pages; be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
/* allocate (de)compressed page arrays if cannot be kept on stack */ /* allocate (de)compressed page arrays if cannot be kept on stack */
be->decompressed_pages = NULL; be->decompressed_pages = NULL;
be->compressed_pages = NULL; be->compressed_pages = NULL;
be->onstack_used = 0; be->onstack_used = 0;
if (nr_pages <= Z_EROFS_ONSTACK_PAGES) { if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) {
be->decompressed_pages = be->onstack_pages; be->decompressed_pages = be->onstack_pages;
be->onstack_used = nr_pages; be->onstack_used = be->nr_pages;
memset(be->decompressed_pages, 0, memset(be->decompressed_pages, 0,
sizeof(struct page *) * nr_pages); sizeof(struct page *) * be->nr_pages);
} }
if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES) if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES)
@ -980,7 +964,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
if (!be->decompressed_pages) if (!be->decompressed_pages)
be->decompressed_pages = be->decompressed_pages =
kvcalloc(nr_pages, sizeof(struct page *), kvcalloc(be->nr_pages, sizeof(struct page *),
GFP_KERNEL | __GFP_NOFAIL); GFP_KERNEL | __GFP_NOFAIL);
if (!be->compressed_pages) if (!be->compressed_pages)
be->compressed_pages = be->compressed_pages =
@ -997,15 +981,6 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
if (err) if (err)
goto out; goto out;
llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT;
if (nr_pages << PAGE_SHIFT >= pcl->pageofs_out + llen) {
outputsize = llen;
partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH);
} else {
outputsize = (nr_pages << PAGE_SHIFT) - pcl->pageofs_out;
partial = true;
}
if (z_erofs_is_inline_pcluster(pcl)) if (z_erofs_is_inline_pcluster(pcl))
inputsize = pcl->tailpacking_size; inputsize = pcl->tailpacking_size;
else else
@ -1018,10 +993,10 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
.pageofs_in = pcl->pageofs_in, .pageofs_in = pcl->pageofs_in,
.pageofs_out = pcl->pageofs_out, .pageofs_out = pcl->pageofs_out,
.inputsize = inputsize, .inputsize = inputsize,
.outputsize = outputsize, .outputsize = pcl->length,
.alg = pcl->algorithmformat, .alg = pcl->algorithmformat,
.inplace_io = overlapped, .inplace_io = overlapped,
.partial_decoding = partial .partial_decoding = pcl->partial,
}, be->pagepool); }, be->pagepool);
out: out:
@ -1046,7 +1021,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES) be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
kvfree(be->compressed_pages); kvfree(be->compressed_pages);
for (i = 0; i < nr_pages; ++i) { for (i = 0; i < be->nr_pages; ++i) {
page = be->decompressed_pages[i]; page = be->decompressed_pages[i];
if (!page) if (!page)
continue; continue;
@ -1064,7 +1039,8 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
if (be->decompressed_pages != be->onstack_pages) if (be->decompressed_pages != be->onstack_pages)
kvfree(be->decompressed_pages); kvfree(be->decompressed_pages);
pcl->nr_pages = 0; pcl->length = 0;
pcl->partial = true;
pcl->bvset.nextpage = NULL; pcl->bvset.nextpage = NULL;
pcl->vcnt = 0; pcl->vcnt = 0;

View file

@ -12,9 +12,6 @@
#define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE) #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
#define Z_EROFS_INLINE_BVECS 2 #define Z_EROFS_INLINE_BVECS 2
#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001
#define Z_EROFS_PCLUSTER_LENGTH_BIT 1
/* /*
* let's leave a type here in case of introducing * let's leave a type here in case of introducing
* another tagged pointer later. * another tagged pointer later.
@ -53,7 +50,7 @@ struct z_erofs_pcluster {
/* A: point to next chained pcluster or TAILs */ /* A: point to next chained pcluster or TAILs */
z_erofs_next_pcluster_t next; z_erofs_next_pcluster_t next;
/* A: lower limit of decompressed length and if full length or not */ /* L: the maximum decompression size of this round */
unsigned int length; unsigned int length;
/* L: total number of bvecs */ /* L: total number of bvecs */
@ -65,9 +62,6 @@ struct z_erofs_pcluster {
/* I: page offset of inline compressed data */ /* I: page offset of inline compressed data */
unsigned short pageofs_in; unsigned short pageofs_in;
/* L: maximum relative page index in bvecs */
unsigned short nr_pages;
union { union {
/* L: inline a certain number of bvec for bootstrap */ /* L: inline a certain number of bvec for bootstrap */
struct z_erofs_bvset_inline bvset; struct z_erofs_bvset_inline bvset;
@ -87,6 +81,9 @@ struct z_erofs_pcluster {
/* I: compression algorithm format */ /* I: compression algorithm format */
unsigned char algorithmformat; unsigned char algorithmformat;
/* L: whether partial decompression or not */
bool partial;
/* A: compressed bvecs (can be cached or inplaced pages) */ /* A: compressed bvecs (can be cached or inplaced pages) */
struct z_erofs_bvec compressed_bvecs[]; struct z_erofs_bvec compressed_bvecs[];
}; };