linux_old1/fs/erofs/zpvec.h

160 lines
3.7 KiB
C
Raw Permalink Normal View History

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2018 HUAWEI, Inc.
* https://www.huawei.com/
*/
#ifndef __EROFS_FS_ZPVEC_H
#define __EROFS_FS_ZPVEC_H
#include "tagptr.h"
/* page type in pagevec for decompress subsystem */
enum z_erofs_page_type {
/* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */
Z_EROFS_PAGE_TYPE_EXCLUSIVE,
Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED,
Z_EROFS_VLE_PAGE_TYPE_HEAD,
Z_EROFS_VLE_PAGE_TYPE_MAX
};
extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0")
__bad_page_type_exclusive(void);
/* pagevec tagged pointer */
typedef tagptr2_t erofs_vtptr_t;
/* pagevec collector */
struct z_erofs_pagevec_ctor {
struct page *curr, *next;
erofs_vtptr_t *pages;
unsigned int nr, index;
};
static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor,
bool atomic)
{
if (!ctor->curr)
return;
if (atomic)
kunmap_atomic(ctor->pages);
else
kunmap(ctor->curr);
}
static inline struct page *
z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor,
unsigned int nr)
{
unsigned int index;
/* keep away from occupied pages */
if (ctor->next)
return ctor->next;
for (index = 0; index < nr; ++index) {
const erofs_vtptr_t t = ctor->pages[index];
const unsigned int tags = tagptr_unfold_tags(t);
if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE)
return tagptr_unfold_ptr(t);
}
DBG_BUGON(nr >= ctor->nr);
return NULL;
}
static inline void
z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor,
bool atomic)
{
struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr);
z_erofs_pagevec_ctor_exit(ctor, atomic);
ctor->curr = next;
ctor->next = NULL;
ctor->pages = atomic ?
kmap_atomic(ctor->curr) : kmap(ctor->curr);
ctor->nr = PAGE_SIZE / sizeof(struct page *);
ctor->index = 0;
}
static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
unsigned int nr,
erofs_vtptr_t *pages,
unsigned int i)
{
ctor->nr = nr;
ctor->curr = ctor->next = NULL;
ctor->pages = pages;
if (i >= nr) {
i -= nr;
z_erofs_pagevec_ctor_pagedown(ctor, false);
while (i > ctor->nr) {
i -= ctor->nr;
z_erofs_pagevec_ctor_pagedown(ctor, false);
}
}
ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i);
ctor->index = i;
}
static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
struct page *page,
erofs: fix unsafe pagevec reuse of hooked pclusters There are pclusters in runtime marked with Z_EROFS_PCLUSTER_TAIL before actual I/O submission. Thus, the decompression chain can be extended if the following pcluster chain hooks such tail pcluster. As the related comment mentioned, if some page is made of a hooked pcluster and another followed pcluster, it can be reused for in-place I/O (since I/O should be submitted anyway): _______________________________________________________________ | tail (partial) page | head (partial) page | |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________| However, it's by no means safe to reuse as pagevec since if such PRIMARY_HOOKED pclusters finally move into bypass chain without I/O submission. It's somewhat hard to reproduce with LZ4 and I just found it (general protection fault) by ro_fsstressing a LZMA image for long time. I'm going to actively clean up related code together with multi-page folio adaption in the next few months. Let's address it directly for easier backporting for now. Call trace for reference: z_erofs_decompress_pcluster+0x10a/0x8a0 [erofs] z_erofs_decompress_queue.isra.36+0x3c/0x60 [erofs] z_erofs_runqueue+0x5f3/0x840 [erofs] z_erofs_readahead+0x1e8/0x320 [erofs] read_pages+0x91/0x270 page_cache_ra_unbounded+0x18b/0x240 filemap_get_pages+0x10a/0x5f0 filemap_read+0xa9/0x330 new_sync_read+0x11b/0x1a0 vfs_read+0xf1/0x190 Link: https://lore.kernel.org/r/20211103182006.4040-1-xiang@kernel.org Fixes: 3883a79abd02 ("staging: erofs: introduce VLE decompression support") Cc: <stable@vger.kernel.org> # 4.19+ Reviewed-by: Chao Yu <chao@kernel.org> Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2021-11-04 02:20:06 +08:00
enum z_erofs_page_type type,
bool pvec_safereuse)
{
erofs: fix unsafe pagevec reuse of hooked pclusters There are pclusters in runtime marked with Z_EROFS_PCLUSTER_TAIL before actual I/O submission. Thus, the decompression chain can be extended if the following pcluster chain hooks such tail pcluster. As the related comment mentioned, if some page is made of a hooked pcluster and another followed pcluster, it can be reused for in-place I/O (since I/O should be submitted anyway): _______________________________________________________________ | tail (partial) page | head (partial) page | |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________| However, it's by no means safe to reuse as pagevec since if such PRIMARY_HOOKED pclusters finally move into bypass chain without I/O submission. It's somewhat hard to reproduce with LZ4 and I just found it (general protection fault) by ro_fsstressing a LZMA image for long time. I'm going to actively clean up related code together with multi-page folio adaption in the next few months. Let's address it directly for easier backporting for now. Call trace for reference: z_erofs_decompress_pcluster+0x10a/0x8a0 [erofs] z_erofs_decompress_queue.isra.36+0x3c/0x60 [erofs] z_erofs_runqueue+0x5f3/0x840 [erofs] z_erofs_readahead+0x1e8/0x320 [erofs] read_pages+0x91/0x270 page_cache_ra_unbounded+0x18b/0x240 filemap_get_pages+0x10a/0x5f0 filemap_read+0xa9/0x330 new_sync_read+0x11b/0x1a0 vfs_read+0xf1/0x190 Link: https://lore.kernel.org/r/20211103182006.4040-1-xiang@kernel.org Fixes: 3883a79abd02 ("staging: erofs: introduce VLE decompression support") Cc: <stable@vger.kernel.org> # 4.19+ Reviewed-by: Chao Yu <chao@kernel.org> Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2021-11-04 02:20:06 +08:00
if (!ctor->next) {
/* some pages cannot be reused as pvec safely without I/O */
if (type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && !pvec_safereuse)
type = Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED;
if (type != Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
ctor->index + 1 == ctor->nr)
return false;
erofs: fix unsafe pagevec reuse of hooked pclusters There are pclusters in runtime marked with Z_EROFS_PCLUSTER_TAIL before actual I/O submission. Thus, the decompression chain can be extended if the following pcluster chain hooks such tail pcluster. As the related comment mentioned, if some page is made of a hooked pcluster and another followed pcluster, it can be reused for in-place I/O (since I/O should be submitted anyway): _______________________________________________________________ | tail (partial) page | head (partial) page | |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________| However, it's by no means safe to reuse as pagevec since if such PRIMARY_HOOKED pclusters finally move into bypass chain without I/O submission. It's somewhat hard to reproduce with LZ4 and I just found it (general protection fault) by ro_fsstressing a LZMA image for long time. I'm going to actively clean up related code together with multi-page folio adaption in the next few months. Let's address it directly for easier backporting for now. Call trace for reference: z_erofs_decompress_pcluster+0x10a/0x8a0 [erofs] z_erofs_decompress_queue.isra.36+0x3c/0x60 [erofs] z_erofs_runqueue+0x5f3/0x840 [erofs] z_erofs_readahead+0x1e8/0x320 [erofs] read_pages+0x91/0x270 page_cache_ra_unbounded+0x18b/0x240 filemap_get_pages+0x10a/0x5f0 filemap_read+0xa9/0x330 new_sync_read+0x11b/0x1a0 vfs_read+0xf1/0x190 Link: https://lore.kernel.org/r/20211103182006.4040-1-xiang@kernel.org Fixes: 3883a79abd02 ("staging: erofs: introduce VLE decompression support") Cc: <stable@vger.kernel.org> # 4.19+ Reviewed-by: Chao Yu <chao@kernel.org> Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2021-11-04 02:20:06 +08:00
}
if (ctor->index >= ctor->nr)
z_erofs_pagevec_ctor_pagedown(ctor, false);
/* exclusive page type must be 0 */
if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL)
__bad_page_type_exclusive();
/* should remind that collector->next never equal to 1, 2 */
if (type == (uintptr_t)ctor->next) {
ctor->next = page;
}
ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type);
return true;
}
static inline struct page *
z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor,
enum z_erofs_page_type *type)
{
erofs_vtptr_t t;
if (ctor->index >= ctor->nr) {
DBG_BUGON(!ctor->next);
z_erofs_pagevec_ctor_pagedown(ctor, true);
}
t = ctor->pages[ctor->index];
*type = tagptr_unfold_tags(t);
/* should remind that collector->next never equal to 1, 2 */
if (*type == (uintptr_t)ctor->next)
ctor->next = tagptr_unfold_ptr(t);
ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0);
return tagptr_unfold_ptr(t);
}
#endif