staging: erofs: localize UNALLOCATED_CACHED_PAGE placeholder
In practice, in order to do cached decompression rather than reuse them for in-place decompression and make full use of pages in page_pool instead of allocating as much as possible, an unallocated placeholder was introduce to mark all in compressed_pages[] and they will be replaced at the time of submission. Previously EROFS_UNALLOCATED_CACHED_PAGE was included in internal.h, which is unnecessary since it's only internally used in decompression subsystem, move it to unzip_vle.c and rename it to PAGE_UNALLOCATED. Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
c1448fa880
commit
672e547610
|
@ -289,8 +289,6 @@ static inline void erofs_workstation_cleanup_all(struct super_block *sb)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
||||||
#define EROFS_UNALLOCATED_CACHED_PAGE ((void *)0x5F0EF00D)
|
|
||||||
|
|
||||||
extern int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
|
extern int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
|
||||||
struct erofs_workgroup *egrp);
|
struct erofs_workgroup *egrp);
|
||||||
extern int erofs_try_to_free_cached_page(struct address_space *mapping,
|
extern int erofs_try_to_free_cached_page(struct address_space *mapping,
|
||||||
|
|
|
@ -15,6 +15,12 @@
|
||||||
|
|
||||||
#include <trace/events/erofs.h>
|
#include <trace/events/erofs.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* a compressed_pages[] placeholder in order to avoid
|
||||||
|
* being filled with file pages for in-place decompression.
|
||||||
|
*/
|
||||||
|
#define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
|
||||||
|
|
||||||
static struct workqueue_struct *z_erofs_workqueue __read_mostly;
|
static struct workqueue_struct *z_erofs_workqueue __read_mostly;
|
||||||
static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
|
static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
|
||||||
|
|
||||||
|
@ -147,7 +153,7 @@ static bool grab_managed_cache_pages(struct address_space *mapping,
|
||||||
noio = false;
|
noio = false;
|
||||||
if (!reserve_allocation)
|
if (!reserve_allocation)
|
||||||
continue;
|
continue;
|
||||||
page = EROFS_UNALLOCATED_CACHED_PAGE;
|
page = PAGE_UNALLOCATED;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cmpxchg(compressed_pages + i, NULL, page))
|
if (!cmpxchg(compressed_pages + i, NULL, page))
|
||||||
|
@ -1180,7 +1186,7 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
|
||||||
#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
||||||
cachemngd = false;
|
cachemngd = false;
|
||||||
|
|
||||||
if (page == EROFS_UNALLOCATED_CACHED_PAGE) {
|
if (page == PAGE_UNALLOCATED) {
|
||||||
cachemngd = true;
|
cachemngd = true;
|
||||||
goto do_allocpage;
|
goto do_allocpage;
|
||||||
} else if (page) {
|
} else if (page) {
|
||||||
|
|
Loading…
Reference in New Issue