mirror of https://gitee.com/openkylin/linux.git
mm, rmap: handle anon_vma_prepare() common case inline
anon_vma_prepare() is mostly a large "if (unlikely(...))" block, as the expected common case is that an anon_vma already exists. We could turn the condition around and return 0, but it also makes sense to do it inline and avoid a call for the common case. Bloat-o-meter naturally shows that inlining the check has some code size costs: add/remove: 1/1 grow/shrink: 4/0 up/down: 475/-373 (102) function old new delta __anon_vma_prepare - 359 +359 handle_mm_fault 2744 2796 +52 hugetlb_cow 1146 1170 +24 hugetlb_fault 2123 2145 +22 wp_page_copy 1469 1487 +18 anon_vma_prepare 373 - -373 Checking the asm however confirms that the hot paths now avoid a call, which is moved away. [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/20161116074005.22768-1-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
46e8a3a08c
commit
d5a187daf5
|
@ -137,11 +137,19 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
|
|||
* anon_vma helper functions.
|
||||
*/
|
||||
void anon_vma_init(void); /* create anon_vma_cachep */
|
||||
int anon_vma_prepare(struct vm_area_struct *);
|
||||
int __anon_vma_prepare(struct vm_area_struct *);
|
||||
void unlink_anon_vmas(struct vm_area_struct *);
|
||||
int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
|
||||
int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
|
||||
|
||||
static inline int anon_vma_prepare(struct vm_area_struct *vma)
|
||||
{
|
||||
if (likely(vma->anon_vma))
|
||||
return 0;
|
||||
|
||||
return __anon_vma_prepare(vma);
|
||||
}
|
||||
|
||||
static inline void anon_vma_merge(struct vm_area_struct *vma,
|
||||
struct vm_area_struct *next)
|
||||
{
|
||||
|
|
15
mm/rmap.c
15
mm/rmap.c
|
@ -141,14 +141,15 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
|
|||
}
|
||||
|
||||
/**
|
||||
* anon_vma_prepare - attach an anon_vma to a memory region
|
||||
* __anon_vma_prepare - attach an anon_vma to a memory region
|
||||
* @vma: the memory region in question
|
||||
*
|
||||
* This makes sure the memory mapping described by 'vma' has
|
||||
* an 'anon_vma' attached to it, so that we can associate the
|
||||
* anonymous pages mapped into it with that anon_vma.
|
||||
*
|
||||
* The common case will be that we already have one, but if
|
||||
* The common case will be that we already have one, which
|
||||
* is handled inline by anon_vma_prepare(). But if
|
||||
* not we either need to find an adjacent mapping that we
|
||||
* can re-use the anon_vma from (very common when the only
|
||||
* reason for splitting a vma has been mprotect()), or we
|
||||
|
@ -167,15 +168,13 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
|
|||
*
|
||||
* This must be called with the mmap_sem held for reading.
|
||||
*/
|
||||
int anon_vma_prepare(struct vm_area_struct *vma)
|
||||
int __anon_vma_prepare(struct vm_area_struct *vma)
|
||||
{
|
||||
struct anon_vma *anon_vma = vma->anon_vma;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct anon_vma *anon_vma, *allocated;
|
||||
struct anon_vma_chain *avc;
|
||||
|
||||
might_sleep();
|
||||
if (unlikely(!anon_vma)) {
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct anon_vma *allocated;
|
||||
|
||||
avc = anon_vma_chain_alloc(GFP_KERNEL);
|
||||
if (!avc)
|
||||
|
@ -208,7 +207,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
|
|||
put_anon_vma(allocated);
|
||||
if (unlikely(avc))
|
||||
anon_vma_chain_free(avc);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_enomem_free_avc:
|
||||
|
|
Loading…
Reference in New Issue