erofs: get rid of ->lru usage

Currently, ->lru is a way to arrange non-LRU pages and has some
in-kernel users. In order to minimize noticable issues of page
reclaim and cache thrashing under high memory presure, limited
temporary pages were all chained with ->lru and can be reused
during the request. However, it seems that ->lru could be removed
when folio is landing.

Let's use page->private to chain temporary pages for now instead
and transform EROFS formally after the topic of the folio / file
page design is finalized.

Link: https://lore.kernel.org/r/20211022090120.14675-1-hsiangkao@linux.alibaba.com
Cc: Matthew Wilcox <willy@infradead.org>
Reviewed-by: Kent Overstreet <kent.overstreet@gmail.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
This commit is contained in:
Gao Xiang 2021-10-22 17:01:20 +08:00
parent 622ceaddb7
commit eaa9172ad9
7 changed files with 53 additions and 44 deletions

View File

@ -22,7 +22,7 @@ struct z_erofs_decompress_req {
struct z_erofs_decompressor { struct z_erofs_decompressor {
int (*decompress)(struct z_erofs_decompress_req *rq, int (*decompress)(struct z_erofs_decompress_req *rq,
struct list_head *pagepool); struct page **pagepool);
char *name; char *name;
}; };
@ -64,7 +64,7 @@ static inline bool z_erofs_is_shortlived_page(struct page *page)
return true; return true;
} }
static inline bool z_erofs_put_shortlivedpage(struct list_head *pagepool, static inline bool z_erofs_put_shortlivedpage(struct page **pagepool,
struct page *page) struct page *page)
{ {
if (!z_erofs_is_shortlived_page(page)) if (!z_erofs_is_shortlived_page(page))
@ -75,8 +75,7 @@ static inline bool z_erofs_put_shortlivedpage(struct list_head *pagepool,
put_page(page); put_page(page);
} else { } else {
/* follow the pcluster rule above. */ /* follow the pcluster rule above. */
set_page_private(page, 0); erofs_pagepool_add(pagepool, page);
list_add(&page->lru, pagepool);
} }
return true; return true;
} }
@ -89,9 +88,9 @@ static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
} }
int z_erofs_decompress(struct z_erofs_decompress_req *rq, int z_erofs_decompress(struct z_erofs_decompress_req *rq,
struct list_head *pagepool); struct page **pagepool);
/* prototypes for specific algorithms */ /* prototypes for specific algorithms */
int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
struct list_head *pagepool); struct page **pagepool);
#endif #endif

View File

@ -57,7 +57,7 @@ int z_erofs_load_lz4_config(struct super_block *sb,
* all physical pages are consecutive, which can be seen for moderate CR. * all physical pages are consecutive, which can be seen for moderate CR.
*/ */
static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq, static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
struct list_head *pagepool) struct page **pagepool)
{ {
const unsigned int nr = const unsigned int nr =
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
@ -254,7 +254,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq,
} }
static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
struct list_head *pagepool) struct page **pagepool)
{ {
const unsigned int nrpages_out = const unsigned int nrpages_out =
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
@ -296,7 +296,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
} }
static int z_erofs_shifted_transform(struct z_erofs_decompress_req *rq, static int z_erofs_shifted_transform(struct z_erofs_decompress_req *rq,
struct list_head *pagepool) struct page **pagepool)
{ {
const unsigned int nrpages_out = const unsigned int nrpages_out =
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
@ -352,7 +352,7 @@ static struct z_erofs_decompressor decompressors[] = {
}; };
int z_erofs_decompress(struct z_erofs_decompress_req *rq, int z_erofs_decompress(struct z_erofs_decompress_req *rq,
struct list_head *pagepool) struct page **pagepool)
{ {
return decompressors[rq->alg].decompress(rq, pagepool); return decompressors[rq->alg].decompress(rq, pagepool);
} }

View File

@ -150,7 +150,7 @@ int z_erofs_load_lzma_config(struct super_block *sb,
} }
int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
struct list_head *pagepool) struct page **pagepool)
{ {
const unsigned int nrpages_out = const unsigned int nrpages_out =
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;

View File

@ -499,7 +499,14 @@ void erofs_pcpubuf_init(void);
void erofs_pcpubuf_exit(void); void erofs_pcpubuf_exit(void);
/* utils.c / zdata.c */ /* utils.c / zdata.c */
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp); struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp);
static inline void erofs_pagepool_add(struct page **pagepool,
struct page *page)
{
set_page_private(page, (unsigned long)*pagepool);
*pagepool = page;
}
void erofs_release_pages(struct page **pagepool);
#ifdef CONFIG_EROFS_FS_ZIP #ifdef CONFIG_EROFS_FS_ZIP
int erofs_workgroup_put(struct erofs_workgroup *grp); int erofs_workgroup_put(struct erofs_workgroup *grp);

View File

@ -49,7 +49,7 @@ int erofs_pcpubuf_growsize(unsigned int nrpages)
{ {
static DEFINE_MUTEX(pcb_resize_mutex); static DEFINE_MUTEX(pcb_resize_mutex);
static unsigned int pcb_nrpages; static unsigned int pcb_nrpages;
LIST_HEAD(pagepool); struct page *pagepool = NULL;
int delta, cpu, ret, i; int delta, cpu, ret, i;
mutex_lock(&pcb_resize_mutex); mutex_lock(&pcb_resize_mutex);
@ -102,13 +102,13 @@ int erofs_pcpubuf_growsize(unsigned int nrpages)
vunmap(old_ptr); vunmap(old_ptr);
free_pagearray: free_pagearray:
while (i) while (i)
list_add(&oldpages[--i]->lru, &pagepool); erofs_pagepool_add(&pagepool, oldpages[--i]);
kfree(oldpages); kfree(oldpages);
if (ret) if (ret)
break; break;
} }
pcb_nrpages = nrpages; pcb_nrpages = nrpages;
put_pages_list(&pagepool); erofs_release_pages(&pagepool);
out: out:
mutex_unlock(&pcb_resize_mutex); mutex_unlock(&pcb_resize_mutex);
return ret; return ret;

View File

@ -6,20 +6,29 @@
#include "internal.h" #include "internal.h"
#include <linux/pagevec.h> #include <linux/pagevec.h>
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp) struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
{ {
struct page *page; struct page *page = *pagepool;
if (!list_empty(pool)) { if (page) {
page = lru_to_page(pool);
DBG_BUGON(page_ref_count(page) != 1); DBG_BUGON(page_ref_count(page) != 1);
list_del(&page->lru); *pagepool = (struct page *)page_private(page);
} else { } else {
page = alloc_page(gfp); page = alloc_page(gfp);
} }
return page; return page;
} }
void erofs_release_pages(struct page **pagepool)
{
while (*pagepool) {
struct page *page = *pagepool;
*pagepool = (struct page *)page_private(page);
put_page(page);
}
}
#ifdef CONFIG_EROFS_FS_ZIP #ifdef CONFIG_EROFS_FS_ZIP
/* global shrink count (for all mounted EROFS instances) */ /* global shrink count (for all mounted EROFS instances) */
static atomic_long_t erofs_global_shrink_cnt; static atomic_long_t erofs_global_shrink_cnt;

View File

@ -236,7 +236,7 @@ static DEFINE_MUTEX(z_pagemap_global_lock);
static void preload_compressed_pages(struct z_erofs_collector *clt, static void preload_compressed_pages(struct z_erofs_collector *clt,
struct address_space *mc, struct address_space *mc,
enum z_erofs_cache_alloctype type, enum z_erofs_cache_alloctype type,
struct list_head *pagepool) struct page **pagepool)
{ {
struct z_erofs_pcluster *pcl = clt->pcl; struct z_erofs_pcluster *pcl = clt->pcl;
bool standalone = true; bool standalone = true;
@ -287,12 +287,10 @@ static void preload_compressed_pages(struct z_erofs_collector *clt,
if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t))) if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t)))
continue; continue;
if (page) { if (page)
put_page(page); put_page(page);
} else if (newpage) { else if (newpage)
set_page_private(newpage, 0); erofs_pagepool_add(pagepool, newpage);
list_add(&newpage->lru, pagepool);
}
} }
/* /*
@ -643,7 +641,7 @@ static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
} }
static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
struct page *page, struct list_head *pagepool) struct page *page, struct page **pagepool)
{ {
struct inode *const inode = fe->inode; struct inode *const inode = fe->inode;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode); struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
@ -836,7 +834,7 @@ static void z_erofs_decompressqueue_endio(struct bio *bio)
static int z_erofs_decompress_pcluster(struct super_block *sb, static int z_erofs_decompress_pcluster(struct super_block *sb,
struct z_erofs_pcluster *pcl, struct z_erofs_pcluster *pcl,
struct list_head *pagepool) struct page **pagepool)
{ {
struct erofs_sb_info *const sbi = EROFS_SB(sb); struct erofs_sb_info *const sbi = EROFS_SB(sb);
struct z_erofs_pagevec_ctor ctor; struct z_erofs_pagevec_ctor ctor;
@ -1036,7 +1034,7 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
} }
static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
struct list_head *pagepool) struct page **pagepool)
{ {
z_erofs_next_pcluster_t owned = io->head; z_erofs_next_pcluster_t owned = io->head;
@ -1060,18 +1058,18 @@ static void z_erofs_decompressqueue_work(struct work_struct *work)
{ {
struct z_erofs_decompressqueue *bgq = struct z_erofs_decompressqueue *bgq =
container_of(work, struct z_erofs_decompressqueue, u.work); container_of(work, struct z_erofs_decompressqueue, u.work);
LIST_HEAD(pagepool); struct page *pagepool = NULL;
DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED); DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
z_erofs_decompress_queue(bgq, &pagepool); z_erofs_decompress_queue(bgq, &pagepool);
put_pages_list(&pagepool); erofs_release_pages(&pagepool);
kvfree(bgq); kvfree(bgq);
} }
static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
unsigned int nr, unsigned int nr,
struct list_head *pagepool, struct page **pagepool,
struct address_space *mc, struct address_space *mc,
gfp_t gfp) gfp_t gfp)
{ {
@ -1173,7 +1171,7 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
out_allocpage: out_allocpage:
page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL); page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) { if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
list_add(&page->lru, pagepool); erofs_pagepool_add(pagepool, page);
cond_resched(); cond_resched();
goto repeat; goto repeat;
} }
@ -1257,7 +1255,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
static void z_erofs_submit_queue(struct super_block *sb, static void z_erofs_submit_queue(struct super_block *sb,
struct z_erofs_decompress_frontend *f, struct z_erofs_decompress_frontend *f,
struct list_head *pagepool, struct page **pagepool,
struct z_erofs_decompressqueue *fgq, struct z_erofs_decompressqueue *fgq,
bool *force_fg) bool *force_fg)
{ {
@ -1365,7 +1363,7 @@ static void z_erofs_submit_queue(struct super_block *sb,
static void z_erofs_runqueue(struct super_block *sb, static void z_erofs_runqueue(struct super_block *sb,
struct z_erofs_decompress_frontend *f, struct z_erofs_decompress_frontend *f,
struct list_head *pagepool, bool force_fg) struct page **pagepool, bool force_fg)
{ {
struct z_erofs_decompressqueue io[NR_JOBQUEUES]; struct z_erofs_decompressqueue io[NR_JOBQUEUES];
@ -1394,7 +1392,7 @@ static void z_erofs_runqueue(struct super_block *sb,
static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
struct readahead_control *rac, struct readahead_control *rac,
erofs_off_t end, erofs_off_t end,
struct list_head *pagepool, struct page **pagepool,
bool backmost) bool backmost)
{ {
struct inode *inode = f->inode; struct inode *inode = f->inode;
@ -1457,8 +1455,8 @@ static int z_erofs_readpage(struct file *file, struct page *page)
{ {
struct inode *const inode = page->mapping->host; struct inode *const inode = page->mapping->host;
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
struct page *pagepool = NULL;
int err; int err;
LIST_HEAD(pagepool);
trace_erofs_readpage(page, false); trace_erofs_readpage(page, false);
f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT; f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
@ -1479,8 +1477,7 @@ static int z_erofs_readpage(struct file *file, struct page *page)
if (f.map.mpage) if (f.map.mpage)
put_page(f.map.mpage); put_page(f.map.mpage);
/* clean up the remaining free pages */ erofs_release_pages(&pagepool);
put_pages_list(&pagepool);
return err; return err;
} }
@ -1489,9 +1486,8 @@ static void z_erofs_readahead(struct readahead_control *rac)
struct inode *const inode = rac->mapping->host; struct inode *const inode = rac->mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode); struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
struct page *page, *head = NULL; struct page *pagepool = NULL, *head = NULL, *page;
unsigned int nr_pages; unsigned int nr_pages;
LIST_HEAD(pagepool);
f.readahead = true; f.readahead = true;
f.headoffset = readahead_pos(rac); f.headoffset = readahead_pos(rac);
@ -1528,9 +1524,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
nr_pages <= sbi->opt.max_sync_decompress_pages); nr_pages <= sbi->opt.max_sync_decompress_pages);
if (f.map.mpage) if (f.map.mpage)
put_page(f.map.mpage); put_page(f.map.mpage);
erofs_release_pages(&pagepool);
/* clean up the remaining free pages */
put_pages_list(&pagepool);
} }
const struct address_space_operations z_erofs_aops = { const struct address_space_operations z_erofs_aops = {