erofs: support decompress big pcluster for lz4 backend

Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,

 - (maptype 0) if there is only one compressed page + no need
   to copy inplace I/O, just map it directly what we did before;

 - (maptype 1) if there are more compressed pages + no need to
   copy inplace I/O, vmap such compressed pages instead;

 - (maptype 2) if inplace I/O needs to be copied, use per-CPU
   buffers for decompression then.

Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.

Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.

Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
This commit is contained in:
Gao Xiang 2021-04-07 12:39:26 +08:00
parent b86269f438
commit 598162d050
2 changed files with 139 additions and 96 deletions

View file

@ -120,44 +120,85 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
return kaddr ? 1 : 0; return kaddr ? 1 : 0;
} }
static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq, static void *z_erofs_handle_inplace_io(struct z_erofs_decompress_req *rq,
u8 *src, unsigned int pageofs_in) void *inpage, unsigned int *inputmargin, int *maptype,
bool support_0padding)
{ {
/* unsigned int nrpages_in, nrpages_out;
* if in-place decompression is ongoing, those decompressed unsigned int ofull, oend, inputsize, total, i, j;
* pages should be copied in order to avoid being overlapped. struct page **in;
*/ void *src, *tmp;
struct page **in = rq->in;
u8 *const tmp = erofs_get_pcpubuf(1);
u8 *tmpp = tmp;
unsigned int inlen = rq->inputsize - pageofs_in;
unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in);
while (tmpp < tmp + inlen) { inputsize = rq->inputsize;
if (!src) nrpages_in = PAGE_ALIGN(inputsize) >> PAGE_SHIFT;
src = kmap_atomic(*in); oend = rq->pageofs_out + rq->outputsize;
memcpy(tmpp, src + pageofs_in, count); ofull = PAGE_ALIGN(oend);
kunmap_atomic(src); nrpages_out = ofull >> PAGE_SHIFT;
src = NULL;
tmpp += count; if (rq->inplace_io) {
pageofs_in = 0; if (rq->partial_decoding || !support_0padding ||
count = PAGE_SIZE; ofull - oend < LZ4_DECOMPRESS_INPLACE_MARGIN(inputsize))
++in; goto docopy;
for (i = 0; i < nrpages_in; ++i) {
DBG_BUGON(rq->in[i] == NULL);
for (j = 0; j < nrpages_out - nrpages_in + i; ++j)
if (rq->out[j] == rq->in[i])
goto docopy;
}
} }
return tmp;
if (nrpages_in <= 1) {
*maptype = 0;
return inpage;
}
kunmap_atomic(inpage);
might_sleep();
src = erofs_vm_map_ram(rq->in, nrpages_in);
if (!src)
return ERR_PTR(-ENOMEM);
*maptype = 1;
return src;
docopy:
/* Or copy compressed data which can be overlapped to per-CPU buffer */
in = rq->in;
src = erofs_get_pcpubuf(nrpages_in);
if (!src) {
DBG_BUGON(1);
kunmap_atomic(inpage);
return ERR_PTR(-EFAULT);
}
tmp = src;
total = rq->inputsize;
while (total) {
unsigned int page_copycnt =
min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
if (!inpage)
inpage = kmap_atomic(*in);
memcpy(tmp, inpage + *inputmargin, page_copycnt);
kunmap_atomic(inpage);
inpage = NULL;
tmp += page_copycnt;
total -= page_copycnt;
++in;
*inputmargin = 0;
}
*maptype = 2;
return src;
} }
static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out) static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
{ {
unsigned int inputmargin, inlen; unsigned int inputmargin;
u8 *src; u8 *headpage, *src;
bool copied, support_0padding; bool support_0padding;
int ret; int ret, maptype;
if (rq->inputsize > PAGE_SIZE) DBG_BUGON(*rq->in == NULL);
return -EOPNOTSUPP; headpage = kmap_atomic(*rq->in);
src = kmap_atomic(*rq->in);
inputmargin = 0; inputmargin = 0;
support_0padding = false; support_0padding = false;
@ -165,50 +206,37 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) { if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) {
support_0padding = true; support_0padding = true;
while (!src[inputmargin & ~PAGE_MASK]) while (!headpage[inputmargin & ~PAGE_MASK])
if (!(++inputmargin & ~PAGE_MASK)) if (!(++inputmargin & ~PAGE_MASK))
break; break;
if (inputmargin >= rq->inputsize) { if (inputmargin >= rq->inputsize) {
kunmap_atomic(src); kunmap_atomic(headpage);
return -EIO; return -EIO;
} }
} }
copied = false; rq->inputsize -= inputmargin;
inlen = rq->inputsize - inputmargin; src = z_erofs_handle_inplace_io(rq, headpage, &inputmargin, &maptype,
if (rq->inplace_io) { support_0padding);
const uint oend = (rq->pageofs_out + if (IS_ERR(src))
rq->outputsize) & ~PAGE_MASK; return PTR_ERR(src);
const uint nr = PAGE_ALIGN(rq->pageofs_out +
rq->outputsize) >> PAGE_SHIFT;
if (rq->partial_decoding || !support_0padding ||
rq->out[nr - 1] != rq->in[0] ||
rq->inputsize - oend <
LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) {
src = generic_copy_inplace_data(rq, src, inputmargin);
inputmargin = 0;
copied = true;
}
}
/* legacy format could compress extra data in a pcluster. */ /* legacy format could compress extra data in a pcluster. */
if (rq->partial_decoding || !support_0padding) if (rq->partial_decoding || !support_0padding)
ret = LZ4_decompress_safe_partial(src + inputmargin, out, ret = LZ4_decompress_safe_partial(src + inputmargin, out,
inlen, rq->outputsize, rq->inputsize, rq->outputsize, rq->outputsize);
rq->outputsize);
else else
ret = LZ4_decompress_safe(src + inputmargin, out, ret = LZ4_decompress_safe(src + inputmargin, out,
inlen, rq->outputsize); rq->inputsize, rq->outputsize);
if (ret != rq->outputsize) { if (ret != rq->outputsize) {
erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]", erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
ret, inlen, inputmargin, rq->outputsize); ret, rq->inputsize, inputmargin, rq->outputsize);
WARN_ON(1); WARN_ON(1);
print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
16, 1, src + inputmargin, inlen, true); 16, 1, src + inputmargin, rq->inputsize, true);
print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
16, 1, out, rq->outputsize, true); 16, 1, out, rq->outputsize, true);
@ -217,10 +245,16 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
ret = -EIO; ret = -EIO;
} }
if (copied) if (maptype == 0) {
erofs_put_pcpubuf(src);
else
kunmap_atomic(src); kunmap_atomic(src);
} else if (maptype == 1) {
vm_unmap_ram(src, PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT);
} else if (maptype == 2) {
erofs_put_pcpubuf(src);
} else {
DBG_BUGON(1);
return -EFAULT;
}
return ret; return ret;
} }
@ -270,57 +304,51 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
const struct z_erofs_decompressor *alg = decompressors + rq->alg; const struct z_erofs_decompressor *alg = decompressors + rq->alg;
unsigned int dst_maptype; unsigned int dst_maptype;
void *dst; void *dst;
int ret, i; int ret;
if (nrpages_out == 1 && !rq->inplace_io) { /* two optimized fast paths only for non bigpcluster cases yet */
DBG_BUGON(!*rq->out); if (rq->inputsize <= PAGE_SIZE) {
dst = kmap_atomic(*rq->out); if (nrpages_out == 1 && !rq->inplace_io) {
dst_maptype = 0; DBG_BUGON(!*rq->out);
goto dstmap_out; dst = kmap_atomic(*rq->out);
} dst_maptype = 0;
goto dstmap_out;
/* }
* For the case of small output size (especially much less
* than PAGE_SIZE), memcpy the decompressed data rather than /*
* compressed data is preferred. * For the case of small output size (especially much less
*/ * than PAGE_SIZE), memcpy the decompressed data rather than
if (rq->outputsize <= PAGE_SIZE * 7 / 8) { * compressed data is preferred.
dst = erofs_get_pcpubuf(1); */
if (IS_ERR(dst)) if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
return PTR_ERR(dst); dst = erofs_get_pcpubuf(1);
if (IS_ERR(dst))
rq->inplace_io = false; return PTR_ERR(dst);
ret = alg->decompress(rq, dst);
if (!ret) rq->inplace_io = false;
copy_from_pcpubuf(rq->out, dst, rq->pageofs_out, ret = alg->decompress(rq, dst);
rq->outputsize); if (!ret)
copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
erofs_put_pcpubuf(dst); rq->outputsize);
return ret;
erofs_put_pcpubuf(dst);
return ret;
}
} }
/* general decoding path which can be used for all cases */
ret = alg->prepare_destpages(rq, pagepool); ret = alg->prepare_destpages(rq, pagepool);
if (ret < 0) { if (ret < 0)
return ret; return ret;
} else if (ret) { if (ret) {
dst = page_address(*rq->out); dst = page_address(*rq->out);
dst_maptype = 1; dst_maptype = 1;
goto dstmap_out; goto dstmap_out;
} }
i = 0; dst = erofs_vm_map_ram(rq->out, nrpages_out);
while (1) {
dst = vm_map_ram(rq->out, nrpages_out, -1);
/* retry two more times (totally 3 times) */
if (dst || ++i >= 3)
break;
vm_unmap_aliases();
}
if (!dst) if (!dst)
return -ENOMEM; return -ENOMEM;
dst_maptype = 2; dst_maptype = 2;
dstmap_out: dstmap_out:

View file

@ -402,6 +402,21 @@ int erofs_namei(struct inode *dir, struct qstr *name,
/* dir.c */ /* dir.c */
extern const struct file_operations erofs_dir_fops; extern const struct file_operations erofs_dir_fops;
static inline void *erofs_vm_map_ram(struct page **pages, unsigned int count)
{
int retried = 0;
while (1) {
void *p = vm_map_ram(pages, count, -1);
/* retry two more times (totally 3 times) */
if (p || ++retried >= 3)
return p;
vm_unmap_aliases();
}
return NULL;
}
/* pcpubuf.c */ /* pcpubuf.c */
void *erofs_get_pcpubuf(unsigned int requiredpages); void *erofs_get_pcpubuf(unsigned int requiredpages);
void erofs_put_pcpubuf(void *ptr); void erofs_put_pcpubuf(void *ptr);