mm/filemap: Add folio_lock()

This is like lock_page() but for use by callers who know they have a folio.
Convert __lock_page() to be __folio_lock().  This saves one call to
compound_head() per contended call to lock_page().

Saves 455 bytes of text; mostly from improved register allocation and
inlining decisions.  __folio_lock is 59 bytes while __lock_page was 79.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Jeff Layton <jlayton@kernel.org>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Mike Rapoport <rppt@linux.ibm.com>
This commit is contained in:
Matthew Wilcox (Oracle) 2021-03-01 19:38:25 -05:00
parent 4e1364286d
commit 7c23c782d5
2 changed files with 34 additions and 19 deletions

View File

@ -652,7 +652,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
return true;
}
extern void __lock_page(struct page *page);
void __folio_lock(struct folio *folio);
extern int __lock_page_killable(struct page *page);
extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
@ -660,13 +660,24 @@ extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
void unlock_page(struct page *page);
void folio_unlock(struct folio *folio);
static inline bool folio_trylock(struct folio *folio)
{
return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
}
/*
* Return true if the page was successfully locked
*/
static inline int trylock_page(struct page *page)
{
page = compound_head(page);
return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
return folio_trylock(page_folio(page));
}
static inline void folio_lock(struct folio *folio)
{
might_sleep();
if (!folio_trylock(folio))
__folio_lock(folio);
}
/*
@ -674,9 +685,12 @@ static inline int trylock_page(struct page *page)
*/
static inline void lock_page(struct page *page)
{
struct folio *folio;
might_sleep();
if (!trylock_page(page))
__lock_page(page);
folio = page_folio(page);
if (!folio_trylock(folio))
__folio_lock(folio);
}
/*

View File

@ -1242,7 +1242,7 @@ static void wake_up_page(struct page *page, int bit)
*/
enum behavior {
EXCLUSIVE, /* Hold ref to page and take the bit when woken, like
* __lock_page() waiting on then setting PG_locked.
* __folio_lock() waiting on then setting PG_locked.
*/
SHARED, /* Hold ref to page and check the bit when woken, like
* wait_on_page_writeback() waiting on PG_writeback.
@ -1633,17 +1633,16 @@ void page_endio(struct page *page, bool is_write, int err)
EXPORT_SYMBOL_GPL(page_endio);
/**
* __lock_page - get a lock on the page, assuming we need to sleep to get it
* @__page: the page to lock
* __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
* @folio: The folio to lock
*/
void __lock_page(struct page *__page)
void __folio_lock(struct folio *folio)
{
struct page *page = compound_head(__page);
wait_queue_head_t *q = page_waitqueue(page);
wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE,
wait_queue_head_t *q = page_waitqueue(&folio->page);
wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_UNINTERRUPTIBLE,
EXCLUSIVE);
}
EXPORT_SYMBOL(__lock_page);
EXPORT_SYMBOL(__folio_lock);
int __lock_page_killable(struct page *__page)
{
@ -1718,10 +1717,10 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
return 0;
}
} else {
__lock_page(page);
__folio_lock(page_folio(page));
}
return 1;
return 1;
}
/**
@ -2915,7 +2914,9 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
struct file **fpin)
{
if (trylock_page(page))
struct folio *folio = page_folio(page);
if (folio_trylock(folio))
return 1;
/*
@ -2928,7 +2929,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
if (vmf->flags & FAULT_FLAG_KILLABLE) {
if (__lock_page_killable(page)) {
if (__lock_page_killable(&folio->page)) {
/*
* We didn't have the right flags to drop the mmap_lock,
* but all fault_handlers only check for fatal signals
@ -2940,11 +2941,11 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
return 0;
}
} else
__lock_page(page);
__folio_lock(folio);
return 1;
}
/*
* Synchronous readahead happens when we don't even find a page in the page
* cache at all. We don't want to perform IO under the mmap sem, so if we have