mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
Merge tag 'erofs-for-6.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs
Pull erofs updates from Gao Xiang: - Fix a WARNING caused by a recent FSDAX misdetection regression - Fix the filesystem stacking limit for file-backed mounts - Print more informative diagnostics on decompression errors - Switch the on-disk definition `erofs_fs.h` to the MIT license - Minor cleanups * tag 'erofs-for-6.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs: erofs: switch on-disk header `erofs_fs.h` to MIT license erofs: get rid of raw bi_end_io() usage erofs: enable error reporting for z_erofs_fixup_insize() erofs: enable error reporting for z_erofs_stream_switch_bufs() erofs: improve Zstd, LZMA and DEFLATE error strings erofs: improve decompression error reporting erofs: tidy up z_erofs_lz4_handle_overlap() erofs: limit the level of fs stacking for file-backed mounts erofs: correct FSDAX detection
This commit is contained in:
@@ -23,8 +23,8 @@ struct z_erofs_decompress_req {
|
||||
struct z_erofs_decompressor {
|
||||
int (*config)(struct super_block *sb, struct erofs_super_block *dsb,
|
||||
void *data, int size);
|
||||
int (*decompress)(struct z_erofs_decompress_req *rq,
|
||||
struct page **pagepool);
|
||||
const char *(*decompress)(struct z_erofs_decompress_req *rq,
|
||||
struct page **pagepool);
|
||||
int (*init)(void);
|
||||
void (*exit)(void);
|
||||
char *name;
|
||||
@@ -70,10 +70,10 @@ struct z_erofs_stream_dctx {
|
||||
bool bounced; /* is the bounce buffer used now? */
|
||||
};
|
||||
|
||||
int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
|
||||
void **src, struct page **pgpl);
|
||||
int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
|
||||
unsigned int padbufsize);
|
||||
const char *z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx,
|
||||
void **dst, void **src, struct page **pgpl);
|
||||
const char *z_erofs_fixup_insize(struct z_erofs_decompress_req *rq,
|
||||
const char *padbuf, unsigned int padbufsize);
|
||||
int __init z_erofs_init_decompressor(void);
|
||||
void z_erofs_exit_decompressor(void);
|
||||
int z_erofs_crypto_decompress(struct z_erofs_decompress_req *rq,
|
||||
|
||||
@@ -105,44 +105,58 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
|
||||
return kaddr ? 1 : 0;
|
||||
}
|
||||
|
||||
static void *z_erofs_lz4_handle_overlap(struct z_erofs_decompress_req *rq,
|
||||
static void *z_erofs_lz4_handle_overlap(const struct z_erofs_decompress_req *rq,
|
||||
void *inpage, void *out, unsigned int *inputmargin,
|
||||
int *maptype, bool may_inplace)
|
||||
{
|
||||
unsigned int oend, omargin, total, i;
|
||||
unsigned int oend, omargin, cnt, i;
|
||||
struct page **in;
|
||||
void *src, *tmp;
|
||||
|
||||
if (rq->inplace_io) {
|
||||
oend = rq->pageofs_out + rq->outputsize;
|
||||
omargin = PAGE_ALIGN(oend) - oend;
|
||||
if (rq->partial_decoding || !may_inplace ||
|
||||
omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
|
||||
goto docopy;
|
||||
void *src;
|
||||
|
||||
/*
|
||||
* If in-place I/O isn't used, for example, the bounce compressed cache
|
||||
* can hold data for incomplete read requests. Just map the compressed
|
||||
* buffer as well and decompress directly.
|
||||
*/
|
||||
if (!rq->inplace_io) {
|
||||
if (rq->inpages <= 1) {
|
||||
*maptype = 0;
|
||||
return inpage;
|
||||
}
|
||||
kunmap_local(inpage);
|
||||
src = erofs_vm_map_ram(rq->in, rq->inpages);
|
||||
if (!src)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
*maptype = 1;
|
||||
return src;
|
||||
}
|
||||
/*
|
||||
* Then, deal with in-place I/Os. The reasons why in-place I/O is useful
|
||||
* are: (1) It minimizes memory footprint during the I/O submission,
|
||||
* which is useful for slow storage (including network devices and
|
||||
* low-end HDDs/eMMCs) but with a lot inflight I/Os; (2) If in-place
|
||||
* decompression can also be applied, it will reuse the unique buffer so
|
||||
* that no extra CPU D-cache is polluted with temporary compressed data
|
||||
* for extreme performance.
|
||||
*/
|
||||
oend = rq->pageofs_out + rq->outputsize;
|
||||
omargin = PAGE_ALIGN(oend) - oend;
|
||||
if (!rq->partial_decoding && may_inplace &&
|
||||
omargin >= LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) {
|
||||
for (i = 0; i < rq->inpages; ++i)
|
||||
if (rq->out[rq->outpages - rq->inpages + i] !=
|
||||
rq->in[i])
|
||||
goto docopy;
|
||||
kunmap_local(inpage);
|
||||
*maptype = 3;
|
||||
return out + ((rq->outpages - rq->inpages) << PAGE_SHIFT);
|
||||
break;
|
||||
if (i >= rq->inpages) {
|
||||
kunmap_local(inpage);
|
||||
*maptype = 3;
|
||||
return out + ((rq->outpages - rq->inpages) << PAGE_SHIFT);
|
||||
}
|
||||
}
|
||||
|
||||
if (rq->inpages <= 1) {
|
||||
*maptype = 0;
|
||||
return inpage;
|
||||
}
|
||||
kunmap_local(inpage);
|
||||
src = erofs_vm_map_ram(rq->in, rq->inpages);
|
||||
if (!src)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
*maptype = 1;
|
||||
return src;
|
||||
|
||||
docopy:
|
||||
/* Or copy compressed data which can be overlapped to per-CPU buffer */
|
||||
in = rq->in;
|
||||
/*
|
||||
* If in-place decompression can't be applied, copy compressed data that
|
||||
* may potentially overlap during decompression to a per-CPU buffer.
|
||||
*/
|
||||
src = z_erofs_get_gbuf(rq->inpages);
|
||||
if (!src) {
|
||||
DBG_BUGON(1);
|
||||
@@ -150,20 +164,13 @@ docopy:
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
|
||||
tmp = src;
|
||||
total = rq->inputsize;
|
||||
while (total) {
|
||||
unsigned int page_copycnt =
|
||||
min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
|
||||
|
||||
for (i = 0, in = rq->in; i < rq->inputsize; i += cnt, ++in) {
|
||||
cnt = min_t(u32, rq->inputsize - i, PAGE_SIZE - *inputmargin);
|
||||
if (!inpage)
|
||||
inpage = kmap_local_page(*in);
|
||||
memcpy(tmp, inpage + *inputmargin, page_copycnt);
|
||||
memcpy(src + i, inpage + *inputmargin, cnt);
|
||||
kunmap_local(inpage);
|
||||
inpage = NULL;
|
||||
tmp += page_copycnt;
|
||||
total -= page_copycnt;
|
||||
++in;
|
||||
*inputmargin = 0;
|
||||
}
|
||||
*maptype = 2;
|
||||
@@ -171,21 +178,21 @@ docopy:
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the exact inputsize with zero_padding feature.
|
||||
* - For LZ4, it should work if zero_padding feature is on (5.3+);
|
||||
* - For MicroLZMA, it'd be enabled all the time.
|
||||
* Get the exact on-disk size of the compressed data:
|
||||
* - For LZ4, it should apply if the zero_padding feature is on (5.3+);
|
||||
* - For others, zero_padding is enabled all the time.
|
||||
*/
|
||||
int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
|
||||
unsigned int padbufsize)
|
||||
const char *z_erofs_fixup_insize(struct z_erofs_decompress_req *rq,
|
||||
const char *padbuf, unsigned int padbufsize)
|
||||
{
|
||||
const char *padend;
|
||||
|
||||
padend = memchr_inv(padbuf, 0, padbufsize);
|
||||
if (!padend)
|
||||
return -EFSCORRUPTED;
|
||||
return "compressed data start not found";
|
||||
rq->inputsize -= padend - padbuf;
|
||||
rq->pageofs_in += padend - padbuf;
|
||||
return 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq, u8 *dst)
|
||||
@@ -193,6 +200,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq, u8 *dst
|
||||
bool support_0padding = false, may_inplace = false;
|
||||
unsigned int inputmargin;
|
||||
u8 *out, *headpage, *src;
|
||||
const char *reason;
|
||||
int ret, maptype;
|
||||
|
||||
DBG_BUGON(*rq->in == NULL);
|
||||
@@ -201,12 +209,12 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq, u8 *dst
|
||||
/* LZ4 decompression inplace is only safe if zero_padding is enabled */
|
||||
if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
|
||||
support_0padding = true;
|
||||
ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
|
||||
reason = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
|
||||
min_t(unsigned int, rq->inputsize,
|
||||
rq->sb->s_blocksize - rq->pageofs_in));
|
||||
if (ret) {
|
||||
if (reason) {
|
||||
kunmap_local(headpage);
|
||||
return ret;
|
||||
return IS_ERR(reason) ? PTR_ERR(reason) : -EFSCORRUPTED;
|
||||
}
|
||||
may_inplace = !((rq->pageofs_in + rq->inputsize) &
|
||||
(rq->sb->s_blocksize - 1));
|
||||
@@ -228,8 +236,6 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq, u8 *dst
|
||||
rq->inputsize, rq->outputsize);
|
||||
|
||||
if (ret != rq->outputsize) {
|
||||
erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
|
||||
ret, rq->inputsize, inputmargin, rq->outputsize);
|
||||
if (ret >= 0)
|
||||
memset(out + ret, 0, rq->outputsize - ret);
|
||||
ret = -EFSCORRUPTED;
|
||||
@@ -250,8 +256,8 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq, u8 *dst
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
|
||||
struct page **pagepool)
|
||||
static const char *z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
|
||||
struct page **pagepool)
|
||||
{
|
||||
unsigned int dst_maptype;
|
||||
void *dst;
|
||||
@@ -266,14 +272,14 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
|
||||
/* general decoding path which can be used for all cases */
|
||||
ret = z_erofs_lz4_prepare_dstpages(rq, pagepool);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return ERR_PTR(ret);
|
||||
if (ret > 0) {
|
||||
dst = page_address(*rq->out);
|
||||
dst_maptype = 1;
|
||||
} else {
|
||||
dst = erofs_vm_map_ram(rq->out, rq->outpages);
|
||||
if (!dst)
|
||||
return -ENOMEM;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
dst_maptype = 2;
|
||||
}
|
||||
}
|
||||
@@ -282,11 +288,11 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
|
||||
kunmap_local(dst);
|
||||
else if (dst_maptype == 2)
|
||||
vm_unmap_ram(dst, rq->outpages);
|
||||
return ret;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
|
||||
struct page **pagepool)
|
||||
static const char *z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
|
||||
struct page **pagepool)
|
||||
{
|
||||
const unsigned int nrpages_in = rq->inpages, nrpages_out = rq->outpages;
|
||||
const unsigned int bs = rq->sb->s_blocksize;
|
||||
@@ -294,7 +300,7 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
|
||||
u8 *kin;
|
||||
|
||||
if (rq->outputsize > rq->inputsize)
|
||||
return -EOPNOTSUPP;
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
|
||||
cur = bs - (rq->pageofs_out & (bs - 1));
|
||||
pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
|
||||
@@ -334,22 +340,19 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
|
||||
kunmap_local(kin);
|
||||
}
|
||||
DBG_BUGON(ni > nrpages_in);
|
||||
return 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
|
||||
void **src, struct page **pgpl)
|
||||
const char *z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx,
|
||||
void **dst, void **src, struct page **pgpl)
|
||||
{
|
||||
struct z_erofs_decompress_req *rq = dctx->rq;
|
||||
struct super_block *sb = rq->sb;
|
||||
struct page **pgo, *tmppage;
|
||||
unsigned int j;
|
||||
|
||||
if (!dctx->avail_out) {
|
||||
if (++dctx->no >= rq->outpages || !rq->outputsize) {
|
||||
erofs_err(sb, "insufficient space for decompressed data");
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
if (++dctx->no >= rq->outpages || !rq->outputsize)
|
||||
return "insufficient space for decompressed data";
|
||||
|
||||
if (dctx->kout)
|
||||
kunmap_local(dctx->kout);
|
||||
@@ -360,7 +363,7 @@ int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
|
||||
*pgo = erofs_allocpage(pgpl, rq->gfp);
|
||||
if (!*pgo) {
|
||||
dctx->kout = NULL;
|
||||
return -ENOMEM;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
set_page_private(*pgo, Z_EROFS_SHORTLIVED_PAGE);
|
||||
}
|
||||
@@ -374,10 +377,8 @@ int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
|
||||
}
|
||||
|
||||
if (dctx->inbuf_pos == dctx->inbuf_sz && rq->inputsize) {
|
||||
if (++dctx->ni >= rq->inpages) {
|
||||
erofs_err(sb, "invalid compressed data");
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
if (++dctx->ni >= rq->inpages)
|
||||
return "invalid compressed data";
|
||||
if (dctx->kout) /* unlike kmap(), take care of the orders */
|
||||
kunmap_local(dctx->kout);
|
||||
kunmap_local(dctx->kin);
|
||||
@@ -412,12 +413,12 @@ int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
|
||||
continue;
|
||||
tmppage = erofs_allocpage(pgpl, rq->gfp);
|
||||
if (!tmppage)
|
||||
return -ENOMEM;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
|
||||
copy_highpage(tmppage, rq->in[j]);
|
||||
rq->in[j] = tmppage;
|
||||
}
|
||||
return 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const struct z_erofs_decompressor *z_erofs_decomp[] = {
|
||||
|
||||
@@ -9,16 +9,17 @@ static int __z_erofs_crypto_decompress(struct z_erofs_decompress_req *rq,
|
||||
struct sg_table st_src, st_dst;
|
||||
struct acomp_req *req;
|
||||
struct crypto_wait wait;
|
||||
const char *reason;
|
||||
u8 *headpage;
|
||||
int ret;
|
||||
|
||||
headpage = kmap_local_page(*rq->in);
|
||||
ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
|
||||
reason = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
|
||||
min_t(unsigned int, rq->inputsize,
|
||||
rq->sb->s_blocksize - rq->pageofs_in));
|
||||
kunmap_local(headpage);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (reason)
|
||||
return IS_ERR(reason) ? PTR_ERR(reason) : -EFSCORRUPTED;
|
||||
|
||||
req = acomp_request_alloc(tfm);
|
||||
if (!req)
|
||||
|
||||
@@ -97,21 +97,22 @@ failed:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int __z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
|
||||
struct page **pgpl)
|
||||
static const char *__z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
|
||||
struct page **pgpl)
|
||||
{
|
||||
struct super_block *sb = rq->sb;
|
||||
struct z_erofs_stream_dctx dctx = { .rq = rq, .no = -1, .ni = 0 };
|
||||
struct z_erofs_deflate *strm;
|
||||
int zerr, err;
|
||||
const char *reason;
|
||||
int zerr;
|
||||
|
||||
/* 1. get the exact DEFLATE compressed size */
|
||||
dctx.kin = kmap_local_page(*rq->in);
|
||||
err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
|
||||
reason = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
|
||||
min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
|
||||
if (err) {
|
||||
if (reason) {
|
||||
kunmap_local(dctx.kin);
|
||||
return err;
|
||||
return reason;
|
||||
}
|
||||
|
||||
/* 2. get an available DEFLATE context */
|
||||
@@ -129,7 +130,7 @@ again:
|
||||
/* 3. multi-call decompress */
|
||||
zerr = zlib_inflateInit2(&strm->z, -MAX_WBITS);
|
||||
if (zerr != Z_OK) {
|
||||
err = -EIO;
|
||||
reason = ERR_PTR(-EINVAL);
|
||||
goto failed_zinit;
|
||||
}
|
||||
|
||||
@@ -143,10 +144,10 @@ again:
|
||||
while (1) {
|
||||
dctx.avail_out = strm->z.avail_out;
|
||||
dctx.inbuf_sz = strm->z.avail_in;
|
||||
err = z_erofs_stream_switch_bufs(&dctx,
|
||||
reason = z_erofs_stream_switch_bufs(&dctx,
|
||||
(void **)&strm->z.next_out,
|
||||
(void **)&strm->z.next_in, pgpl);
|
||||
if (err)
|
||||
if (reason)
|
||||
break;
|
||||
strm->z.avail_out = dctx.avail_out;
|
||||
strm->z.avail_in = dctx.inbuf_sz;
|
||||
@@ -157,14 +158,14 @@ again:
|
||||
break;
|
||||
if (zerr == Z_STREAM_END && !rq->outputsize)
|
||||
break;
|
||||
erofs_err(sb, "failed to decompress %d in[%u] out[%u]",
|
||||
zerr, rq->inputsize, rq->outputsize);
|
||||
err = -EFSCORRUPTED;
|
||||
reason = (zerr == Z_DATA_ERROR ?
|
||||
"corrupted compressed data" :
|
||||
"unexpected end of stream");
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (zlib_inflateEnd(&strm->z) != Z_OK && !err)
|
||||
err = -EIO;
|
||||
if (zlib_inflateEnd(&strm->z) != Z_OK && !reason)
|
||||
reason = ERR_PTR(-EIO);
|
||||
if (dctx.kout)
|
||||
kunmap_local(dctx.kout);
|
||||
failed_zinit:
|
||||
@@ -175,11 +176,11 @@ failed_zinit:
|
||||
z_erofs_deflate_head = strm;
|
||||
spin_unlock(&z_erofs_deflate_lock);
|
||||
wake_up(&z_erofs_deflate_wq);
|
||||
return err;
|
||||
return reason;
|
||||
}
|
||||
|
||||
static int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
|
||||
struct page **pgpl)
|
||||
static const char *z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
|
||||
struct page **pgpl)
|
||||
{
|
||||
#ifdef CONFIG_EROFS_FS_ZIP_ACCEL
|
||||
int err;
|
||||
@@ -187,7 +188,7 @@ static int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
|
||||
if (!rq->partial_decoding) {
|
||||
err = z_erofs_crypto_decompress(rq, pgpl);
|
||||
if (err != -EOPNOTSUPP)
|
||||
return err;
|
||||
return ERR_PTR(err);
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -146,23 +146,23 @@ again:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
|
||||
struct page **pgpl)
|
||||
static const char *z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
|
||||
struct page **pgpl)
|
||||
{
|
||||
struct super_block *sb = rq->sb;
|
||||
struct z_erofs_stream_dctx dctx = { .rq = rq, .no = -1, .ni = 0 };
|
||||
struct xz_buf buf = {};
|
||||
struct z_erofs_lzma *strm;
|
||||
enum xz_ret xz_err;
|
||||
int err;
|
||||
const char *reason;
|
||||
|
||||
/* 1. get the exact LZMA compressed size */
|
||||
dctx.kin = kmap_local_page(*rq->in);
|
||||
err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
|
||||
reason = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
|
||||
min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
|
||||
if (err) {
|
||||
if (reason) {
|
||||
kunmap_local(dctx.kin);
|
||||
return err;
|
||||
return reason;
|
||||
}
|
||||
|
||||
/* 2. get an available lzma context */
|
||||
@@ -188,9 +188,9 @@ again:
|
||||
dctx.avail_out = buf.out_size - buf.out_pos;
|
||||
dctx.inbuf_sz = buf.in_size;
|
||||
dctx.inbuf_pos = buf.in_pos;
|
||||
err = z_erofs_stream_switch_bufs(&dctx, (void **)&buf.out,
|
||||
(void **)&buf.in, pgpl);
|
||||
if (err)
|
||||
reason = z_erofs_stream_switch_bufs(&dctx, (void **)&buf.out,
|
||||
(void **)&buf.in, pgpl);
|
||||
if (reason)
|
||||
break;
|
||||
|
||||
if (buf.out_size == buf.out_pos) {
|
||||
@@ -207,9 +207,9 @@ again:
|
||||
if (xz_err != XZ_OK) {
|
||||
if (xz_err == XZ_STREAM_END && !rq->outputsize)
|
||||
break;
|
||||
erofs_err(sb, "failed to decompress %d in[%u] out[%u]",
|
||||
xz_err, rq->inputsize, rq->outputsize);
|
||||
err = -EFSCORRUPTED;
|
||||
reason = (xz_err == XZ_DATA_ERROR ?
|
||||
"corrupted compressed data" :
|
||||
"unexpected end of stream");
|
||||
break;
|
||||
}
|
||||
} while (1);
|
||||
@@ -223,7 +223,7 @@ again:
|
||||
z_erofs_lzma_head = strm;
|
||||
spin_unlock(&z_erofs_lzma_lock);
|
||||
wake_up(&z_erofs_lzma_wq);
|
||||
return err;
|
||||
return reason;
|
||||
}
|
||||
|
||||
const struct z_erofs_decompressor z_erofs_lzma_decomp = {
|
||||
|
||||
@@ -135,8 +135,8 @@ static int z_erofs_load_zstd_config(struct super_block *sb,
|
||||
return strm ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
|
||||
struct page **pgpl)
|
||||
static const char *z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
|
||||
struct page **pgpl)
|
||||
{
|
||||
struct super_block *sb = rq->sb;
|
||||
struct z_erofs_stream_dctx dctx = { .rq = rq, .no = -1, .ni = 0 };
|
||||
@@ -144,15 +144,16 @@ static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
|
||||
zstd_out_buffer out_buf = { NULL, 0, 0 };
|
||||
struct z_erofs_zstd *strm;
|
||||
zstd_dstream *stream;
|
||||
int zerr, err;
|
||||
const char *reason;
|
||||
int zerr;
|
||||
|
||||
/* 1. get the exact compressed size */
|
||||
dctx.kin = kmap_local_page(*rq->in);
|
||||
err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
|
||||
reason = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
|
||||
min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
|
||||
if (err) {
|
||||
if (reason) {
|
||||
kunmap_local(dctx.kin);
|
||||
return err;
|
||||
return reason;
|
||||
}
|
||||
|
||||
/* 2. get an available ZSTD context */
|
||||
@@ -161,7 +162,7 @@ static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
|
||||
/* 3. multi-call decompress */
|
||||
stream = zstd_init_dstream(z_erofs_zstd_max_dictsize, strm->wksp, strm->wkspsz);
|
||||
if (!stream) {
|
||||
err = -EIO;
|
||||
reason = ERR_PTR(-ENOMEM);
|
||||
goto failed_zinit;
|
||||
}
|
||||
|
||||
@@ -174,9 +175,9 @@ static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
|
||||
do {
|
||||
dctx.inbuf_sz = in_buf.size;
|
||||
dctx.inbuf_pos = in_buf.pos;
|
||||
err = z_erofs_stream_switch_bufs(&dctx, &out_buf.dst,
|
||||
reason = z_erofs_stream_switch_bufs(&dctx, &out_buf.dst,
|
||||
(void **)&in_buf.src, pgpl);
|
||||
if (err)
|
||||
if (reason)
|
||||
break;
|
||||
|
||||
if (out_buf.size == out_buf.pos) {
|
||||
@@ -191,11 +192,8 @@ static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
|
||||
if (zstd_is_error(zerr) ||
|
||||
((rq->outputsize + dctx.avail_out) && (!zerr || (zerr > 0 &&
|
||||
!(rq->inputsize + in_buf.size - in_buf.pos))))) {
|
||||
erofs_err(sb, "failed to decompress in[%u] out[%u]: %s",
|
||||
rq->inputsize, rq->outputsize,
|
||||
zstd_is_error(zerr) ? zstd_get_error_name(zerr) :
|
||||
"unexpected end of stream");
|
||||
err = -EFSCORRUPTED;
|
||||
reason = zstd_is_error(zerr) ? zstd_get_error_name(zerr) :
|
||||
"unexpected end of stream";
|
||||
break;
|
||||
}
|
||||
} while (rq->outputsize + dctx.avail_out);
|
||||
@@ -210,7 +208,7 @@ failed_zinit:
|
||||
z_erofs_zstd_head = strm;
|
||||
spin_unlock(&z_erofs_zstd_lock);
|
||||
wake_up(&z_erofs_zstd_wq);
|
||||
return err;
|
||||
return reason;
|
||||
}
|
||||
|
||||
const struct z_erofs_decompressor z_erofs_zstd_decomp = {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only OR Apache-2.0 */
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* EROFS (Enhanced ROM File System) on-disk format definition
|
||||
*
|
||||
|
||||
@@ -34,13 +34,13 @@ static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
|
||||
if (rq->bio.bi_end_io) {
|
||||
if (ret < 0 && !rq->bio.bi_status)
|
||||
rq->bio.bi_status = errno_to_blk_status(ret);
|
||||
rq->bio.bi_end_io(&rq->bio);
|
||||
} else {
|
||||
bio_for_each_folio_all(fi, &rq->bio) {
|
||||
DBG_BUGON(folio_test_uptodate(fi.folio));
|
||||
erofs_onlinefolio_end(fi.folio, ret, false);
|
||||
}
|
||||
}
|
||||
bio_endio(&rq->bio);
|
||||
bio_uninit(&rq->bio);
|
||||
kfree(rq);
|
||||
}
|
||||
|
||||
@@ -185,7 +185,7 @@ static void erofs_fscache_bio_endio(void *priv, ssize_t transferred_or_error)
|
||||
|
||||
if (IS_ERR_VALUE(transferred_or_error))
|
||||
io->bio.bi_status = errno_to_blk_status(transferred_or_error);
|
||||
io->bio.bi_end_io(&io->bio);
|
||||
bio_endio(&io->bio);
|
||||
BUILD_BUG_ON(offsetof(struct erofs_fscache_bio, io) != 0);
|
||||
erofs_fscache_io_put(&io->io);
|
||||
}
|
||||
@@ -216,7 +216,7 @@ void erofs_fscache_submit_bio(struct bio *bio)
|
||||
if (!ret)
|
||||
return;
|
||||
bio->bi_status = errno_to_blk_status(ret);
|
||||
bio->bi_end_io(bio);
|
||||
bio_endio(bio);
|
||||
}
|
||||
|
||||
static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
|
||||
|
||||
@@ -174,15 +174,15 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
|
||||
if (!erofs_is_fileio_mode(sbi)) {
|
||||
dif->dax_dev = fs_dax_get_by_bdev(file_bdev(file),
|
||||
&dif->dax_part_off, NULL, NULL);
|
||||
if (!dif->dax_dev && test_opt(&sbi->opt, DAX_ALWAYS)) {
|
||||
erofs_info(sb, "DAX unsupported by %s. Turning off DAX.",
|
||||
dif->path);
|
||||
clear_opt(&sbi->opt, DAX_ALWAYS);
|
||||
}
|
||||
} else if (!S_ISREG(file_inode(file)->i_mode)) {
|
||||
fput(file);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!dif->dax_dev && test_opt(&sbi->opt, DAX_ALWAYS)) {
|
||||
erofs_info(sb, "DAX unsupported by %s. Turning off DAX.",
|
||||
dif->path);
|
||||
clear_opt(&sbi->opt, DAX_ALWAYS);
|
||||
}
|
||||
dif->file = file;
|
||||
}
|
||||
|
||||
@@ -215,13 +215,13 @@ static int erofs_scan_devices(struct super_block *sb,
|
||||
ondisk_extradevs, sbi->devs->extra_devices);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!ondisk_extradevs) {
|
||||
if (test_opt(&sbi->opt, DAX_ALWAYS) && !sbi->dif0.dax_dev) {
|
||||
erofs_info(sb, "DAX unsupported by block device. Turning off DAX.");
|
||||
clear_opt(&sbi->opt, DAX_ALWAYS);
|
||||
}
|
||||
return 0;
|
||||
|
||||
if (test_opt(&sbi->opt, DAX_ALWAYS) && !sbi->dif0.dax_dev) {
|
||||
erofs_info(sb, "DAX unsupported by block device. Turning off DAX.");
|
||||
clear_opt(&sbi->opt, DAX_ALWAYS);
|
||||
}
|
||||
if (!ondisk_extradevs)
|
||||
return 0;
|
||||
|
||||
if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb))
|
||||
sbi->devs->flatdev = true;
|
||||
@@ -639,6 +639,22 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
|
||||
|
||||
sbi->blkszbits = PAGE_SHIFT;
|
||||
if (!sb->s_bdev) {
|
||||
/*
|
||||
* (File-backed mounts) EROFS claims it's safe to nest other
|
||||
* fs contexts (including its own) due to self-controlled RO
|
||||
* accesses/contexts and no side-effect changes that need to
|
||||
* context save & restore so it can reuse the current thread
|
||||
* context. However, it still needs to bump `s_stack_depth` to
|
||||
* avoid kernel stack overflow from nested filesystems.
|
||||
*/
|
||||
if (erofs_is_fileio_mode(sbi)) {
|
||||
sb->s_stack_depth =
|
||||
file_inode(sbi->dif0.file)->i_sb->s_stack_depth + 1;
|
||||
if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
|
||||
erofs_err(sb, "maximum fs stacking depth exceeded");
|
||||
return -ENOTBLK;
|
||||
}
|
||||
}
|
||||
sb->s_blocksize = PAGE_SIZE;
|
||||
sb->s_blocksize_bits = PAGE_SHIFT;
|
||||
|
||||
|
||||
@@ -1267,12 +1267,13 @@ static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err)
|
||||
struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
|
||||
struct z_erofs_pcluster *pcl = be->pcl;
|
||||
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
|
||||
const struct z_erofs_decompressor *decomp =
|
||||
const struct z_erofs_decompressor *alg =
|
||||
z_erofs_decomp[pcl->algorithmformat];
|
||||
bool try_free = true;
|
||||
int i, j, jtop, err2;
|
||||
struct page *page;
|
||||
bool overlapped;
|
||||
bool try_free = true;
|
||||
const char *reason;
|
||||
|
||||
mutex_lock(&pcl->lock);
|
||||
be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
|
||||
@@ -1304,8 +1305,8 @@ static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err)
|
||||
err2 = z_erofs_parse_in_bvecs(be, &overlapped);
|
||||
if (err2)
|
||||
err = err2;
|
||||
if (!err)
|
||||
err = decomp->decompress(&(struct z_erofs_decompress_req) {
|
||||
if (!err) {
|
||||
reason = alg->decompress(&(struct z_erofs_decompress_req) {
|
||||
.sb = be->sb,
|
||||
.in = be->compressed_pages,
|
||||
.out = be->decompressed_pages,
|
||||
@@ -1322,6 +1323,18 @@ static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err)
|
||||
.gfp = pcl->besteffort ? GFP_KERNEL :
|
||||
GFP_NOWAIT | __GFP_NORETRY
|
||||
}, be->pagepool);
|
||||
if (IS_ERR(reason)) {
|
||||
erofs_err(be->sb, "failed to decompress (%s) %ld @ pa %llu size %u => %u",
|
||||
alg->name, PTR_ERR(reason), pcl->pos,
|
||||
pcl->pclustersize, pcl->length);
|
||||
err = PTR_ERR(reason);
|
||||
} else if (unlikely(reason)) {
|
||||
erofs_err(be->sb, "failed to decompress (%s) %s @ pa %llu size %u => %u",
|
||||
alg->name, reason, pcl->pos,
|
||||
pcl->pclustersize, pcl->length);
|
||||
err = -EFSCORRUPTED;
|
||||
}
|
||||
}
|
||||
|
||||
/* must handle all compressed pages before actual file pages */
|
||||
if (pcl->from_meta) {
|
||||
|
||||
Reference in New Issue
Block a user