mm/filemap: pass a sleep state to put_and_wait_on_page_locked
This is prep work for the next patch, but I think at least one of the current callers would prefer a killable sleep to an uninterruptible one. Link: https://lkml.kernel.org/r/20210122160140.223228-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Kent Overstreet <kent.overstreet@gmail.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Miaohe Lin <linmiaohe@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
cbd59c48ae
commit
4805462598
|
@ -681,8 +681,7 @@ static inline int wait_on_page_locked_killable(struct page *page)
|
||||||
return wait_on_page_bit_killable(compound_head(page), PG_locked);
|
return wait_on_page_bit_killable(compound_head(page), PG_locked);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void put_and_wait_on_page_locked(struct page *page);
|
int put_and_wait_on_page_locked(struct page *page, int state);
|
||||||
|
|
||||||
void wait_on_page_writeback(struct page *page);
|
void wait_on_page_writeback(struct page *page);
|
||||||
extern void end_page_writeback(struct page *page);
|
extern void end_page_writeback(struct page *page);
|
||||||
void wait_for_stable_page(struct page *page);
|
void wait_for_stable_page(struct page *page);
|
||||||
|
|
|
@ -1384,20 +1384,23 @@ static int wait_on_page_locked_async(struct page *page,
|
||||||
/**
|
/**
|
||||||
* put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
|
* put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
|
||||||
* @page: The page to wait for.
|
* @page: The page to wait for.
|
||||||
|
* @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc).
|
||||||
*
|
*
|
||||||
* The caller should hold a reference on @page. They expect the page to
|
* The caller should hold a reference on @page. They expect the page to
|
||||||
* become unlocked relatively soon, but do not wish to hold up migration
|
* become unlocked relatively soon, but do not wish to hold up migration
|
||||||
* (for example) by holding the reference while waiting for the page to
|
* (for example) by holding the reference while waiting for the page to
|
||||||
* come unlocked. After this function returns, the caller should not
|
* come unlocked. After this function returns, the caller should not
|
||||||
* dereference @page.
|
* dereference @page.
|
||||||
|
*
|
||||||
|
* Return: 0 if the page was unlocked or -EINTR if interrupted by a signal.
|
||||||
*/
|
*/
|
||||||
void put_and_wait_on_page_locked(struct page *page)
|
int put_and_wait_on_page_locked(struct page *page, int state)
|
||||||
{
|
{
|
||||||
wait_queue_head_t *q;
|
wait_queue_head_t *q;
|
||||||
|
|
||||||
page = compound_head(page);
|
page = compound_head(page);
|
||||||
q = page_waitqueue(page);
|
q = page_waitqueue(page);
|
||||||
wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP);
|
return wait_on_page_bit_common(q, page, PG_locked, state, DROP);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1439,7 +1439,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
|
||||||
if (!get_page_unless_zero(page))
|
if (!get_page_unless_zero(page))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
spin_unlock(vmf->ptl);
|
spin_unlock(vmf->ptl);
|
||||||
put_and_wait_on_page_locked(page);
|
put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1475,7 +1475,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
|
||||||
if (!get_page_unless_zero(page))
|
if (!get_page_unless_zero(page))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
spin_unlock(vmf->ptl);
|
spin_unlock(vmf->ptl);
|
||||||
put_and_wait_on_page_locked(page);
|
put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -331,7 +331,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
|
||||||
if (!get_page_unless_zero(page))
|
if (!get_page_unless_zero(page))
|
||||||
goto out;
|
goto out;
|
||||||
pte_unmap_unlock(ptep, ptl);
|
pte_unmap_unlock(ptep, ptl);
|
||||||
put_and_wait_on_page_locked(page);
|
put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
|
||||||
return;
|
return;
|
||||||
out:
|
out:
|
||||||
pte_unmap_unlock(ptep, ptl);
|
pte_unmap_unlock(ptep, ptl);
|
||||||
|
@ -365,7 +365,7 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
|
||||||
if (!get_page_unless_zero(page))
|
if (!get_page_unless_zero(page))
|
||||||
goto unlock;
|
goto unlock;
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
put_and_wait_on_page_locked(page);
|
put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
|
||||||
return;
|
return;
|
||||||
unlock:
|
unlock:
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
|
|
Loading…
Reference in New Issue