mirror of https://gitee.com/openkylin/linux.git
mm/swapfile.c: put_swap_page: share more between huge/normal code path
In this patch, locking related code is shared between huge/normal code path in put_swap_page() to reduce code duplication. The `free_entries == 0` case is merged into the more general `free_entries != SWAPFILE_CLUSTER` case, because the new locking method makes it easy. The added lines is same as the removed lines. But the code size is increased when CONFIG_TRANSPARENT_HUGEPAGE=n. text data bss dec hex filename base: 24123 2004 340 26467 6763 mm/swapfile.o unified: 24485 2004 340 26829 68cd mm/swapfile.o Dig on step deeper with `size -A mm/swapfile.o` for base and unified kernel and compare the result, yields, -.text 17723 0 +.text 17835 0 -.orc_unwind_ip 1380 0 +.orc_unwind_ip 1480 0 -.orc_unwind 2070 0 +.orc_unwind 2220 0 -Total 26686 +Total 27048 The total difference is the same. The text segment difference is much smaller: 112. More difference comes from the ORC unwinder segments: (1480 + 2220) - (1380 + 2070) = 250. If the frame pointer unwinder is used, this costs nothing. Link: http://lkml.kernel.org/r/20180720071845.17920-9-ying.huang@intel.com Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Acked-by: Dave Hansen <dave.hansen@linux.intel.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Shaohua Li <shli@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@redhat.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b32d5f32b9
commit
c2343d2761
|
@ -1223,8 +1223,8 @@ void put_swap_page(struct page *page, swp_entry_t entry)
|
||||||
if (!si)
|
if (!si)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
ci = lock_cluster_or_swap_info(si, offset);
|
||||||
if (size == SWAPFILE_CLUSTER) {
|
if (size == SWAPFILE_CLUSTER) {
|
||||||
ci = lock_cluster(si, offset);
|
|
||||||
VM_BUG_ON(!cluster_is_huge(ci));
|
VM_BUG_ON(!cluster_is_huge(ci));
|
||||||
map = si->swap_map + offset;
|
map = si->swap_map + offset;
|
||||||
for (i = 0; i < SWAPFILE_CLUSTER; i++) {
|
for (i = 0; i < SWAPFILE_CLUSTER; i++) {
|
||||||
|
@ -1233,13 +1233,9 @@ void put_swap_page(struct page *page, swp_entry_t entry)
|
||||||
if (val == SWAP_HAS_CACHE)
|
if (val == SWAP_HAS_CACHE)
|
||||||
free_entries++;
|
free_entries++;
|
||||||
}
|
}
|
||||||
if (!free_entries) {
|
|
||||||
for (i = 0; i < SWAPFILE_CLUSTER; i++)
|
|
||||||
map[i] &= ~SWAP_HAS_CACHE;
|
|
||||||
}
|
|
||||||
cluster_clear_huge(ci);
|
cluster_clear_huge(ci);
|
||||||
unlock_cluster(ci);
|
|
||||||
if (free_entries == SWAPFILE_CLUSTER) {
|
if (free_entries == SWAPFILE_CLUSTER) {
|
||||||
|
unlock_cluster_or_swap_info(si, ci);
|
||||||
spin_lock(&si->lock);
|
spin_lock(&si->lock);
|
||||||
ci = lock_cluster(si, offset);
|
ci = lock_cluster(si, offset);
|
||||||
memset(map, 0, SWAPFILE_CLUSTER);
|
memset(map, 0, SWAPFILE_CLUSTER);
|
||||||
|
@ -1250,12 +1246,16 @@ void put_swap_page(struct page *page, swp_entry_t entry)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (size == 1 || free_entries) {
|
|
||||||
for (i = 0; i < size; i++, entry.val++) {
|
for (i = 0; i < size; i++, entry.val++) {
|
||||||
if (!__swap_entry_free(si, entry, SWAP_HAS_CACHE))
|
if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
|
||||||
|
unlock_cluster_or_swap_info(si, ci);
|
||||||
free_swap_slot(entry);
|
free_swap_slot(entry);
|
||||||
|
if (i == size - 1)
|
||||||
|
return;
|
||||||
|
lock_cluster_or_swap_info(si, offset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
unlock_cluster_or_swap_info(si, ci);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_THP_SWAP
|
#ifdef CONFIG_THP_SWAP
|
||||||
|
|
Loading…
Reference in New Issue