mirror of https://gitee.com/openkylin/linux.git
mm: clean up zone flags
Page reclaim tests zone_is_reclaim_dirty(), but the site that actually sets this state does zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY), sending the reader through layers indirection just to track down a simple bit. Remove all zone flag wrappers and just use bitops against zone->flags directly. It's just as readable and the lines are barely any longer. Also rename ZONE_TAIL_LRU_DIRTY to ZONE_DIRTY to match ZONE_WRITEBACK, and remove the zone_flags_t typedef. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7c809968ff
commit
5705465174
|
@ -521,13 +521,13 @@ struct zone {
|
||||||
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
|
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
|
||||||
} ____cacheline_internodealigned_in_smp;
|
} ____cacheline_internodealigned_in_smp;
|
||||||
|
|
||||||
typedef enum {
|
enum zone_flags {
|
||||||
ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
|
ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
|
||||||
ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
|
ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
|
||||||
ZONE_CONGESTED, /* zone has many dirty pages backed by
|
ZONE_CONGESTED, /* zone has many dirty pages backed by
|
||||||
* a congested BDI
|
* a congested BDI
|
||||||
*/
|
*/
|
||||||
ZONE_TAIL_LRU_DIRTY, /* reclaim scanning has recently found
|
ZONE_DIRTY, /* reclaim scanning has recently found
|
||||||
* many dirty file pages at the tail
|
* many dirty file pages at the tail
|
||||||
* of the LRU.
|
* of the LRU.
|
||||||
*/
|
*/
|
||||||
|
@ -535,52 +535,7 @@ typedef enum {
|
||||||
* many pages under writeback
|
* many pages under writeback
|
||||||
*/
|
*/
|
||||||
ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */
|
ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */
|
||||||
} zone_flags_t;
|
};
|
||||||
|
|
||||||
static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
|
|
||||||
{
|
|
||||||
set_bit(flag, &zone->flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
|
|
||||||
{
|
|
||||||
return test_and_set_bit(flag, &zone->flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
|
|
||||||
{
|
|
||||||
clear_bit(flag, &zone->flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int zone_is_reclaim_congested(const struct zone *zone)
|
|
||||||
{
|
|
||||||
return test_bit(ZONE_CONGESTED, &zone->flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int zone_is_reclaim_dirty(const struct zone *zone)
|
|
||||||
{
|
|
||||||
return test_bit(ZONE_TAIL_LRU_DIRTY, &zone->flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int zone_is_reclaim_writeback(const struct zone *zone)
|
|
||||||
{
|
|
||||||
return test_bit(ZONE_WRITEBACK, &zone->flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int zone_is_reclaim_locked(const struct zone *zone)
|
|
||||||
{
|
|
||||||
return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int zone_is_fair_depleted(const struct zone *zone)
|
|
||||||
{
|
|
||||||
return test_bit(ZONE_FAIR_DEPLETED, &zone->flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int zone_is_oom_locked(const struct zone *zone)
|
|
||||||
{
|
|
||||||
return test_bit(ZONE_OOM_LOCKED, &zone->flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long zone_end_pfn(const struct zone *zone)
|
static inline unsigned long zone_end_pfn(const struct zone *zone)
|
||||||
{
|
{
|
||||||
|
|
|
@ -631,7 +631,7 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout)
|
||||||
* of sleeping on the congestion queue
|
* of sleeping on the congestion queue
|
||||||
*/
|
*/
|
||||||
if (atomic_read(&nr_bdi_congested[sync]) == 0 ||
|
if (atomic_read(&nr_bdi_congested[sync]) == 0 ||
|
||||||
!zone_is_reclaim_congested(zone)) {
|
!test_bit(ZONE_CONGESTED, &zone->flags)) {
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
/* In case we scheduled, work out time remaining */
|
/* In case we scheduled, work out time remaining */
|
||||||
|
|
|
@ -565,7 +565,7 @@ bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask)
|
||||||
|
|
||||||
spin_lock(&zone_scan_lock);
|
spin_lock(&zone_scan_lock);
|
||||||
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
|
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
|
||||||
if (zone_is_oom_locked(zone)) {
|
if (test_bit(ZONE_OOM_LOCKED, &zone->flags)) {
|
||||||
ret = false;
|
ret = false;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -575,7 +575,7 @@ bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask)
|
||||||
* call to oom_zonelist_trylock() doesn't succeed when it shouldn't.
|
* call to oom_zonelist_trylock() doesn't succeed when it shouldn't.
|
||||||
*/
|
*/
|
||||||
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
|
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
|
||||||
zone_set_flag(zone, ZONE_OOM_LOCKED);
|
set_bit(ZONE_OOM_LOCKED, &zone->flags);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&zone_scan_lock);
|
spin_unlock(&zone_scan_lock);
|
||||||
|
@ -594,7 +594,7 @@ void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_mask)
|
||||||
|
|
||||||
spin_lock(&zone_scan_lock);
|
spin_lock(&zone_scan_lock);
|
||||||
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
|
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
|
||||||
zone_clear_flag(zone, ZONE_OOM_LOCKED);
|
clear_bit(ZONE_OOM_LOCKED, &zone->flags);
|
||||||
spin_unlock(&zone_scan_lock);
|
spin_unlock(&zone_scan_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1614,8 +1614,8 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
|
||||||
|
|
||||||
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
|
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
|
||||||
if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
|
if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
|
||||||
!zone_is_fair_depleted(zone))
|
!test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
|
||||||
zone_set_flag(zone, ZONE_FAIR_DEPLETED);
|
set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
|
||||||
|
|
||||||
__count_zone_vm_events(PGALLOC, zone, 1 << order);
|
__count_zone_vm_events(PGALLOC, zone, 1 << order);
|
||||||
zone_statistics(preferred_zone, zone, gfp_flags);
|
zone_statistics(preferred_zone, zone, gfp_flags);
|
||||||
|
@ -1935,7 +1935,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
|
||||||
mod_zone_page_state(zone, NR_ALLOC_BATCH,
|
mod_zone_page_state(zone, NR_ALLOC_BATCH,
|
||||||
high_wmark_pages(zone) - low_wmark_pages(zone) -
|
high_wmark_pages(zone) - low_wmark_pages(zone) -
|
||||||
atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
|
atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
|
||||||
zone_clear_flag(zone, ZONE_FAIR_DEPLETED);
|
clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
|
||||||
} while (zone++ != preferred_zone);
|
} while (zone++ != preferred_zone);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1986,7 +1986,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
|
||||||
if (alloc_flags & ALLOC_FAIR) {
|
if (alloc_flags & ALLOC_FAIR) {
|
||||||
if (!zone_local(preferred_zone, zone))
|
if (!zone_local(preferred_zone, zone))
|
||||||
break;
|
break;
|
||||||
if (zone_is_fair_depleted(zone)) {
|
if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
|
||||||
nr_fair_skipped++;
|
nr_fair_skipped++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
28
mm/vmscan.c
28
mm/vmscan.c
|
@ -920,7 +920,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
||||||
/* Case 1 above */
|
/* Case 1 above */
|
||||||
if (current_is_kswapd() &&
|
if (current_is_kswapd() &&
|
||||||
PageReclaim(page) &&
|
PageReclaim(page) &&
|
||||||
zone_is_reclaim_writeback(zone)) {
|
test_bit(ZONE_WRITEBACK, &zone->flags)) {
|
||||||
nr_immediate++;
|
nr_immediate++;
|
||||||
goto keep_locked;
|
goto keep_locked;
|
||||||
|
|
||||||
|
@ -1002,7 +1002,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
||||||
*/
|
*/
|
||||||
if (page_is_file_cache(page) &&
|
if (page_is_file_cache(page) &&
|
||||||
(!current_is_kswapd() ||
|
(!current_is_kswapd() ||
|
||||||
!zone_is_reclaim_dirty(zone))) {
|
!test_bit(ZONE_DIRTY, &zone->flags))) {
|
||||||
/*
|
/*
|
||||||
* Immediately reclaim when written back.
|
* Immediately reclaim when written back.
|
||||||
* Similar in principal to deactivate_page()
|
* Similar in principal to deactivate_page()
|
||||||
|
@ -1563,7 +1563,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
||||||
* are encountered in the nr_immediate check below.
|
* are encountered in the nr_immediate check below.
|
||||||
*/
|
*/
|
||||||
if (nr_writeback && nr_writeback == nr_taken)
|
if (nr_writeback && nr_writeback == nr_taken)
|
||||||
zone_set_flag(zone, ZONE_WRITEBACK);
|
set_bit(ZONE_WRITEBACK, &zone->flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* memcg will stall in page writeback so only consider forcibly
|
* memcg will stall in page writeback so only consider forcibly
|
||||||
|
@ -1575,16 +1575,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
||||||
* backed by a congested BDI and wait_iff_congested will stall.
|
* backed by a congested BDI and wait_iff_congested will stall.
|
||||||
*/
|
*/
|
||||||
if (nr_dirty && nr_dirty == nr_congested)
|
if (nr_dirty && nr_dirty == nr_congested)
|
||||||
zone_set_flag(zone, ZONE_CONGESTED);
|
set_bit(ZONE_CONGESTED, &zone->flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If dirty pages are scanned that are not queued for IO, it
|
* If dirty pages are scanned that are not queued for IO, it
|
||||||
* implies that flushers are not keeping up. In this case, flag
|
* implies that flushers are not keeping up. In this case, flag
|
||||||
* the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
|
* the zone ZONE_DIRTY and kswapd will start writing pages from
|
||||||
* pages from reclaim context.
|
* reclaim context.
|
||||||
*/
|
*/
|
||||||
if (nr_unqueued_dirty == nr_taken)
|
if (nr_unqueued_dirty == nr_taken)
|
||||||
zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
|
set_bit(ZONE_DIRTY, &zone->flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If kswapd scans pages marked marked for immediate
|
* If kswapd scans pages marked marked for immediate
|
||||||
|
@ -2984,7 +2984,7 @@ static bool kswapd_shrink_zone(struct zone *zone,
|
||||||
/* Account for the number of pages attempted to reclaim */
|
/* Account for the number of pages attempted to reclaim */
|
||||||
*nr_attempted += sc->nr_to_reclaim;
|
*nr_attempted += sc->nr_to_reclaim;
|
||||||
|
|
||||||
zone_clear_flag(zone, ZONE_WRITEBACK);
|
clear_bit(ZONE_WRITEBACK, &zone->flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If a zone reaches its high watermark, consider it to be no longer
|
* If a zone reaches its high watermark, consider it to be no longer
|
||||||
|
@ -2994,8 +2994,8 @@ static bool kswapd_shrink_zone(struct zone *zone,
|
||||||
*/
|
*/
|
||||||
if (zone_reclaimable(zone) &&
|
if (zone_reclaimable(zone) &&
|
||||||
zone_balanced(zone, testorder, 0, classzone_idx)) {
|
zone_balanced(zone, testorder, 0, classzone_idx)) {
|
||||||
zone_clear_flag(zone, ZONE_CONGESTED);
|
clear_bit(ZONE_CONGESTED, &zone->flags);
|
||||||
zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
|
clear_bit(ZONE_DIRTY, &zone->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
return sc->nr_scanned >= sc->nr_to_reclaim;
|
return sc->nr_scanned >= sc->nr_to_reclaim;
|
||||||
|
@ -3086,8 +3086,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
|
||||||
* If balanced, clear the dirty and congested
|
* If balanced, clear the dirty and congested
|
||||||
* flags
|
* flags
|
||||||
*/
|
*/
|
||||||
zone_clear_flag(zone, ZONE_CONGESTED);
|
clear_bit(ZONE_CONGESTED, &zone->flags);
|
||||||
zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
|
clear_bit(ZONE_DIRTY, &zone->flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3714,11 +3714,11 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
|
||||||
if (node_state(node_id, N_CPU) && node_id != numa_node_id())
|
if (node_state(node_id, N_CPU) && node_id != numa_node_id())
|
||||||
return ZONE_RECLAIM_NOSCAN;
|
return ZONE_RECLAIM_NOSCAN;
|
||||||
|
|
||||||
if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
|
if (test_and_set_bit(ZONE_RECLAIM_LOCKED, &zone->flags))
|
||||||
return ZONE_RECLAIM_NOSCAN;
|
return ZONE_RECLAIM_NOSCAN;
|
||||||
|
|
||||||
ret = __zone_reclaim(zone, gfp_mask, order);
|
ret = __zone_reclaim(zone, gfp_mask, order);
|
||||||
zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
|
clear_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
|
||||||
|
|
||||||
if (!ret)
|
if (!ret)
|
||||||
count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
|
count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
|
||||||
|
|
Loading…
Reference in New Issue