mm/lru: Add folio_add_lru()
Reimplement lru_cache_add() as a wrapper around folio_add_lru(). Saves 159 bytes of kernel text due to removing calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
934387c99f
commit
0d31125d2d
|
@ -351,6 +351,7 @@ extern unsigned long nr_free_buffer_pages(void);
|
||||||
extern void lru_note_cost(struct lruvec *lruvec, bool file,
|
extern void lru_note_cost(struct lruvec *lruvec, bool file,
|
||||||
unsigned int nr_pages);
|
unsigned int nr_pages);
|
||||||
extern void lru_note_cost_folio(struct folio *);
|
extern void lru_note_cost_folio(struct folio *);
|
||||||
|
extern void folio_add_lru(struct folio *);
|
||||||
extern void lru_cache_add(struct page *);
|
extern void lru_cache_add(struct page *);
|
||||||
void mark_page_accessed(struct page *);
|
void mark_page_accessed(struct page *);
|
||||||
void folio_mark_accessed(struct folio *);
|
void folio_mark_accessed(struct folio *);
|
||||||
|
|
|
@ -102,3 +102,9 @@ bool redirty_page_for_writepage(struct writeback_control *wbc,
|
||||||
return folio_redirty_for_writepage(wbc, page_folio(page));
|
return folio_redirty_for_writepage(wbc, page_folio(page));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(redirty_page_for_writepage);
|
EXPORT_SYMBOL(redirty_page_for_writepage);
|
||||||
|
|
||||||
|
void lru_cache_add(struct page *page)
|
||||||
|
{
|
||||||
|
folio_add_lru(page_folio(page));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(lru_cache_add);
|
||||||
|
|
22
mm/swap.c
22
mm/swap.c
|
@ -437,29 +437,29 @@ void folio_mark_accessed(struct folio *folio)
|
||||||
EXPORT_SYMBOL(folio_mark_accessed);
|
EXPORT_SYMBOL(folio_mark_accessed);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* lru_cache_add - add a page to a page list
|
* folio_add_lru - Add a folio to an LRU list.
|
||||||
* @page: the page to be added to the LRU.
|
* @folio: The folio to be added to the LRU.
|
||||||
*
|
*
|
||||||
* Queue the page for addition to the LRU via pagevec. The decision on whether
|
* Queue the folio for addition to the LRU. The decision on whether
|
||||||
* to add the page to the [in]active [file|anon] list is deferred until the
|
* to add the page to the [in]active [file|anon] list is deferred until the
|
||||||
* pagevec is drained. This gives a chance for the caller of lru_cache_add()
|
* pagevec is drained. This gives a chance for the caller of folio_add_lru()
|
||||||
* have the page added to the active list using mark_page_accessed().
|
* have the folio added to the active list using folio_mark_accessed().
|
||||||
*/
|
*/
|
||||||
void lru_cache_add(struct page *page)
|
void folio_add_lru(struct folio *folio)
|
||||||
{
|
{
|
||||||
struct pagevec *pvec;
|
struct pagevec *pvec;
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
|
VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
|
||||||
VM_BUG_ON_PAGE(PageLRU(page), page);
|
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
|
||||||
|
|
||||||
get_page(page);
|
folio_get(folio);
|
||||||
local_lock(&lru_pvecs.lock);
|
local_lock(&lru_pvecs.lock);
|
||||||
pvec = this_cpu_ptr(&lru_pvecs.lru_add);
|
pvec = this_cpu_ptr(&lru_pvecs.lru_add);
|
||||||
if (pagevec_add_and_need_flush(pvec, page))
|
if (pagevec_add_and_need_flush(pvec, &folio->page))
|
||||||
__pagevec_lru_add(pvec);
|
__pagevec_lru_add(pvec);
|
||||||
local_unlock(&lru_pvecs.lock);
|
local_unlock(&lru_pvecs.lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(lru_cache_add);
|
EXPORT_SYMBOL(folio_add_lru);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* lru_cache_add_inactive_or_unevictable
|
* lru_cache_add_inactive_or_unevictable
|
||||||
|
|
Loading…
Reference in New Issue