erofs: fix unsafe pagevec reuse of hooked pclusters
There are pclusters in runtime marked with Z_EROFS_PCLUSTER_TAIL
before actual I/O submission. Thus, the decompression chain can be
extended if the following pcluster chain hooks such tail pcluster.
As the related comment mentioned, if some page is made of a hooked
pcluster and another followed pcluster, it can be reused for in-place
I/O (since I/O should be submitted anyway):
_______________________________________________________________
| tail (partial) page | head (partial) page |
|_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________|
However, it's by no means safe to reuse as pagevec since if such
PRIMARY_HOOKED pclusters finally move into bypass chain without I/O
submission. It's somewhat hard to reproduce with LZ4 and I just found
it (general protection fault) by ro_fsstressing a LZMA image for long
time.
I'm going to actively clean up related code together with multi-page
folio adaption in the next few months. Let's address it directly for
easier backporting for now.
Call trace for reference:
z_erofs_decompress_pcluster+0x10a/0x8a0 [erofs]
z_erofs_decompress_queue.isra.36+0x3c/0x60 [erofs]
z_erofs_runqueue+0x5f3/0x840 [erofs]
z_erofs_readahead+0x1e8/0x320 [erofs]
read_pages+0x91/0x270
page_cache_ra_unbounded+0x18b/0x240
filemap_get_pages+0x10a/0x5f0
filemap_read+0xa9/0x330
new_sync_read+0x11b/0x1a0
vfs_read+0xf1/0x190
Link: https://lore.kernel.org/r/20211103182006.4040-1-xiang@kernel.org
Fixes: 3883a79abd
("staging: erofs: introduce VLE decompression support")
Cc: <stable@vger.kernel.org> # 4.19+
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
This commit is contained in:
parent
8bb7eca972
commit
86432a6dca
|
@ -373,8 +373,8 @@ static bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
|
||||||
|
|
||||||
/* callers must be with collection lock held */
|
/* callers must be with collection lock held */
|
||||||
static int z_erofs_attach_page(struct z_erofs_collector *clt,
|
static int z_erofs_attach_page(struct z_erofs_collector *clt,
|
||||||
struct page *page,
|
struct page *page, enum z_erofs_page_type type,
|
||||||
enum z_erofs_page_type type)
|
bool pvec_safereuse)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -384,9 +384,9 @@ static int z_erofs_attach_page(struct z_erofs_collector *clt,
|
||||||
z_erofs_try_inplace_io(clt, page))
|
z_erofs_try_inplace_io(clt, page))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = z_erofs_pagevec_enqueue(&clt->vector, page, type);
|
ret = z_erofs_pagevec_enqueue(&clt->vector, page, type,
|
||||||
|
pvec_safereuse);
|
||||||
clt->cl->vcnt += (unsigned int)ret;
|
clt->cl->vcnt += (unsigned int)ret;
|
||||||
|
|
||||||
return ret ? 0 : -EAGAIN;
|
return ret ? 0 : -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -729,7 +729,8 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
|
||||||
tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED);
|
tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED);
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
err = z_erofs_attach_page(clt, page, page_type);
|
err = z_erofs_attach_page(clt, page, page_type,
|
||||||
|
clt->mode >= COLLECT_PRIMARY_FOLLOWED);
|
||||||
/* should allocate an additional short-lived page for pagevec */
|
/* should allocate an additional short-lived page for pagevec */
|
||||||
if (err == -EAGAIN) {
|
if (err == -EAGAIN) {
|
||||||
struct page *const newpage =
|
struct page *const newpage =
|
||||||
|
@ -737,7 +738,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
|
||||||
|
|
||||||
set_page_private(newpage, Z_EROFS_SHORTLIVED_PAGE);
|
set_page_private(newpage, Z_EROFS_SHORTLIVED_PAGE);
|
||||||
err = z_erofs_attach_page(clt, newpage,
|
err = z_erofs_attach_page(clt, newpage,
|
||||||
Z_EROFS_PAGE_TYPE_EXCLUSIVE);
|
Z_EROFS_PAGE_TYPE_EXCLUSIVE, true);
|
||||||
if (!err)
|
if (!err)
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
|
|
@ -106,11 +106,18 @@ static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
|
||||||
|
|
||||||
static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
|
static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
|
||||||
struct page *page,
|
struct page *page,
|
||||||
enum z_erofs_page_type type)
|
enum z_erofs_page_type type,
|
||||||
|
bool pvec_safereuse)
|
||||||
{
|
{
|
||||||
if (!ctor->next && type)
|
if (!ctor->next) {
|
||||||
if (ctor->index + 1 == ctor->nr)
|
/* some pages cannot be reused as pvec safely without I/O */
|
||||||
|
if (type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && !pvec_safereuse)
|
||||||
|
type = Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED;
|
||||||
|
|
||||||
|
if (type != Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
|
||||||
|
ctor->index + 1 == ctor->nr)
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
if (ctor->index >= ctor->nr)
|
if (ctor->index >= ctor->nr)
|
||||||
z_erofs_pagevec_ctor_pagedown(ctor, false);
|
z_erofs_pagevec_ctor_pagedown(ctor, false);
|
||||||
|
|
Loading…
Reference in New Issue