mm: introduce compound_nr()
Replace 1 << compound_order(page) with compound_nr(page). Minor improvements in readability. Link: http://lkml.kernel.org/r/20190721104612.19120-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
94ad933810
commit
d8c6546b1a
|
@ -208,13 +208,13 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
|
||||||
} else {
|
} else {
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
if (cache_is_vipt_nonaliasing()) {
|
if (cache_is_vipt_nonaliasing()) {
|
||||||
for (i = 0; i < (1 << compound_order(page)); i++) {
|
for (i = 0; i < compound_nr(page); i++) {
|
||||||
void *addr = kmap_atomic(page + i);
|
void *addr = kmap_atomic(page + i);
|
||||||
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
|
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
|
||||||
kunmap_atomic(addr);
|
kunmap_atomic(addr);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (i = 0; i < (1 << compound_order(page)); i++) {
|
for (i = 0; i < compound_nr(page); i++) {
|
||||||
void *addr = kmap_high_get(page + i);
|
void *addr = kmap_high_get(page + i);
|
||||||
if (addr) {
|
if (addr) {
|
||||||
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
|
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
|
||||||
|
|
|
@ -667,7 +667,7 @@ void flush_dcache_icache_hugepage(struct page *page)
|
||||||
|
|
||||||
BUG_ON(!PageCompound(page));
|
BUG_ON(!PageCompound(page));
|
||||||
|
|
||||||
for (i = 0; i < (1UL << compound_order(page)); i++) {
|
for (i = 0; i < compound_nr(page); i++) {
|
||||||
if (!PageHighMem(page)) {
|
if (!PageHighMem(page)) {
|
||||||
__flush_dcache_icache(page_address(page+i));
|
__flush_dcache_icache(page_address(page+i));
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -461,7 +461,7 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
|
||||||
static void smaps_account(struct mem_size_stats *mss, struct page *page,
|
static void smaps_account(struct mem_size_stats *mss, struct page *page,
|
||||||
bool compound, bool young, bool dirty, bool locked)
|
bool compound, bool young, bool dirty, bool locked)
|
||||||
{
|
{
|
||||||
int i, nr = compound ? 1 << compound_order(page) : 1;
|
int i, nr = compound ? compound_nr(page) : 1;
|
||||||
unsigned long size = nr * PAGE_SIZE;
|
unsigned long size = nr * PAGE_SIZE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -805,6 +805,12 @@ static inline void set_compound_order(struct page *page, unsigned int order)
|
||||||
page[1].compound_order = order;
|
page[1].compound_order = order;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Returns the number of pages in this potentially compound page. */
|
||||||
|
static inline unsigned long compound_nr(struct page *page)
|
||||||
|
{
|
||||||
|
return 1UL << compound_order(page);
|
||||||
|
}
|
||||||
|
|
||||||
/* Returns the number of bytes in this potentially compound page. */
|
/* Returns the number of bytes in this potentially compound page. */
|
||||||
static inline unsigned long page_size(struct page *page)
|
static inline unsigned long page_size(struct page *page)
|
||||||
{
|
{
|
||||||
|
|
|
@ -969,7 +969,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
||||||
* is safe to read and it's 0 for tail pages.
|
* is safe to read and it's 0 for tail pages.
|
||||||
*/
|
*/
|
||||||
if (unlikely(PageCompound(page))) {
|
if (unlikely(PageCompound(page))) {
|
||||||
low_pfn += (1UL << compound_order(page)) - 1;
|
low_pfn += compound_nr(page) - 1;
|
||||||
goto isolate_fail;
|
goto isolate_fail;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -126,7 +126,7 @@ static void page_cache_delete(struct address_space *mapping,
|
||||||
/* hugetlb pages are represented by a single entry in the xarray */
|
/* hugetlb pages are represented by a single entry in the xarray */
|
||||||
if (!PageHuge(page)) {
|
if (!PageHuge(page)) {
|
||||||
xas_set_order(&xas, page->index, compound_order(page));
|
xas_set_order(&xas, page->index, compound_order(page));
|
||||||
nr = 1U << compound_order(page);
|
nr = compound_nr(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||||
|
|
2
mm/gup.c
2
mm/gup.c
|
@ -1460,7 +1460,7 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
|
||||||
* gup may start from a tail page. Advance step by the left
|
* gup may start from a tail page. Advance step by the left
|
||||||
* part.
|
* part.
|
||||||
*/
|
*/
|
||||||
step = (1 << compound_order(head)) - (pages[i] - head);
|
step = compound_nr(head) - (pages[i] - head);
|
||||||
/*
|
/*
|
||||||
* If we get a page from the CMA zone, since we are going to
|
* If we get a page from the CMA zone, since we are going to
|
||||||
* be pinning these entries, we might as well move them out
|
* be pinning these entries, we might as well move them out
|
||||||
|
|
|
@ -139,7 +139,7 @@ static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
|
||||||
if (!page_hcg || page_hcg != h_cg)
|
if (!page_hcg || page_hcg != h_cg)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
nr_pages = 1 << compound_order(page);
|
nr_pages = compound_nr(page);
|
||||||
if (!parent) {
|
if (!parent) {
|
||||||
parent = root_h_cgroup;
|
parent = root_h_cgroup;
|
||||||
/* root has no limit */
|
/* root has no limit */
|
||||||
|
|
|
@ -336,7 +336,7 @@ void kasan_poison_slab(struct page *page)
|
||||||
{
|
{
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
|
||||||
for (i = 0; i < (1 << compound_order(page)); i++)
|
for (i = 0; i < compound_nr(page); i++)
|
||||||
page_kasan_tag_reset(page + i);
|
page_kasan_tag_reset(page + i);
|
||||||
kasan_poison_shadow(page_address(page), page_size(page),
|
kasan_poison_shadow(page_address(page), page_size(page),
|
||||||
KASAN_KMALLOC_REDZONE);
|
KASAN_KMALLOC_REDZONE);
|
||||||
|
|
|
@ -6511,7 +6511,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
|
||||||
unsigned int nr_pages = 1;
|
unsigned int nr_pages = 1;
|
||||||
|
|
||||||
if (PageTransHuge(page)) {
|
if (PageTransHuge(page)) {
|
||||||
nr_pages <<= compound_order(page);
|
nr_pages = compound_nr(page);
|
||||||
ug->nr_huge += nr_pages;
|
ug->nr_huge += nr_pages;
|
||||||
}
|
}
|
||||||
if (PageAnon(page))
|
if (PageAnon(page))
|
||||||
|
@ -6523,7 +6523,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
|
||||||
}
|
}
|
||||||
ug->pgpgout++;
|
ug->pgpgout++;
|
||||||
} else {
|
} else {
|
||||||
ug->nr_kmem += 1 << compound_order(page);
|
ug->nr_kmem += compound_nr(page);
|
||||||
__ClearPageKmemcg(page);
|
__ClearPageKmemcg(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1309,7 +1309,7 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
|
||||||
head = compound_head(page);
|
head = compound_head(page);
|
||||||
if (page_huge_active(head))
|
if (page_huge_active(head))
|
||||||
return pfn;
|
return pfn;
|
||||||
skip = (1 << compound_order(head)) - (page - head);
|
skip = compound_nr(head) - (page - head);
|
||||||
pfn += skip - 1;
|
pfn += skip - 1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1347,7 +1347,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
|
||||||
|
|
||||||
if (PageHuge(page)) {
|
if (PageHuge(page)) {
|
||||||
struct page *head = compound_head(page);
|
struct page *head = compound_head(page);
|
||||||
pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
|
pfn = page_to_pfn(head) + compound_nr(head) - 1;
|
||||||
isolate_huge_page(head, &source);
|
isolate_huge_page(head, &source);
|
||||||
continue;
|
continue;
|
||||||
} else if (PageTransHuge(page))
|
} else if (PageTransHuge(page))
|
||||||
|
|
|
@ -1892,7 +1892,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
|
||||||
VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
|
VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
|
||||||
|
|
||||||
/* Avoid migrating to a node that is nearly full */
|
/* Avoid migrating to a node that is nearly full */
|
||||||
if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
|
if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (isolate_lru_page(page))
|
if (isolate_lru_page(page))
|
||||||
|
|
|
@ -8196,7 +8196,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
|
||||||
if (!hugepage_migration_supported(page_hstate(head)))
|
if (!hugepage_migration_supported(page_hstate(head)))
|
||||||
goto unmovable;
|
goto unmovable;
|
||||||
|
|
||||||
skip_pages = (1 << compound_order(head)) - (page - head);
|
skip_pages = compound_nr(head) - (page - head);
|
||||||
iter += skip_pages - 1;
|
iter += skip_pages - 1;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1520,8 +1520,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
||||||
if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
|
if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
|
||||||
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
|
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
|
||||||
if (PageHuge(page)) {
|
if (PageHuge(page)) {
|
||||||
int nr = 1 << compound_order(page);
|
hugetlb_count_sub(compound_nr(page), mm);
|
||||||
hugetlb_count_sub(nr, mm);
|
|
||||||
set_huge_swap_pte_at(mm, address,
|
set_huge_swap_pte_at(mm, address,
|
||||||
pvmw.pte, pteval,
|
pvmw.pte, pteval,
|
||||||
vma_mmu_pagesize(vma));
|
vma_mmu_pagesize(vma));
|
||||||
|
|
|
@ -609,7 +609,7 @@ static int shmem_add_to_page_cache(struct page *page,
|
||||||
{
|
{
|
||||||
XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
|
XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
|
||||||
unsigned long i = 0;
|
unsigned long i = 0;
|
||||||
unsigned long nr = 1UL << compound_order(page);
|
unsigned long nr = compound_nr(page);
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||||
VM_BUG_ON_PAGE(index != round_down(index, nr), page);
|
VM_BUG_ON_PAGE(index != round_down(index, nr), page);
|
||||||
|
@ -1884,7 +1884,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
|
||||||
lru_cache_add_anon(page);
|
lru_cache_add_anon(page);
|
||||||
|
|
||||||
spin_lock_irq(&info->lock);
|
spin_lock_irq(&info->lock);
|
||||||
info->alloced += 1 << compound_order(page);
|
info->alloced += compound_nr(page);
|
||||||
inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
|
inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
|
||||||
shmem_recalc_inode(inode);
|
shmem_recalc_inode(inode);
|
||||||
spin_unlock_irq(&info->lock);
|
spin_unlock_irq(&info->lock);
|
||||||
|
@ -1925,7 +1925,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
|
||||||
struct page *head = compound_head(page);
|
struct page *head = compound_head(page);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < (1 << compound_order(head)); i++) {
|
for (i = 0; i < compound_nr(head); i++) {
|
||||||
clear_highpage(head + i);
|
clear_highpage(head + i);
|
||||||
flush_dcache_page(head + i);
|
flush_dcache_page(head + i);
|
||||||
}
|
}
|
||||||
|
@ -1952,7 +1952,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
|
||||||
* Error recovery.
|
* Error recovery.
|
||||||
*/
|
*/
|
||||||
unacct:
|
unacct:
|
||||||
shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
|
shmem_inode_unacct_blocks(inode, compound_nr(page));
|
||||||
|
|
||||||
if (PageTransHuge(page)) {
|
if (PageTransHuge(page)) {
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
|
|
|
@ -116,7 +116,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
|
||||||
struct address_space *address_space = swap_address_space(entry);
|
struct address_space *address_space = swap_address_space(entry);
|
||||||
pgoff_t idx = swp_offset(entry);
|
pgoff_t idx = swp_offset(entry);
|
||||||
XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
|
XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
|
||||||
unsigned long i, nr = 1UL << compound_order(page);
|
unsigned long i, nr = compound_nr(page);
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||||
VM_BUG_ON_PAGE(PageSwapCache(page), page);
|
VM_BUG_ON_PAGE(PageSwapCache(page), page);
|
||||||
|
|
|
@ -521,7 +521,7 @@ bool page_mapped(struct page *page)
|
||||||
return true;
|
return true;
|
||||||
if (PageHuge(page))
|
if (PageHuge(page))
|
||||||
return false;
|
return false;
|
||||||
for (i = 0; i < (1 << compound_order(page)); i++) {
|
for (i = 0; i < compound_nr(page); i++) {
|
||||||
if (atomic_read(&page[i]._mapcount) >= 0)
|
if (atomic_read(&page[i]._mapcount) >= 0)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1149,7 +1149,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(PageActive(page), page);
|
VM_BUG_ON_PAGE(PageActive(page), page);
|
||||||
|
|
||||||
nr_pages = 1 << compound_order(page);
|
nr_pages = compound_nr(page);
|
||||||
|
|
||||||
/* Account the number of base pages even though THP */
|
/* Account the number of base pages even though THP */
|
||||||
sc->nr_scanned += nr_pages;
|
sc->nr_scanned += nr_pages;
|
||||||
|
@ -1705,7 +1705,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(!PageLRU(page), page);
|
VM_BUG_ON_PAGE(!PageLRU(page), page);
|
||||||
|
|
||||||
nr_pages = 1 << compound_order(page);
|
nr_pages = compound_nr(page);
|
||||||
total_scan += nr_pages;
|
total_scan += nr_pages;
|
||||||
|
|
||||||
if (page_zonenum(page) > sc->reclaim_idx) {
|
if (page_zonenum(page) > sc->reclaim_idx) {
|
||||||
|
|
Loading…
Reference in New Issue