[PATCH] kill last zone_reclaim() bits

Remove the last bits of Martin's ill-fated sys_set_zone_reclaim().

Cc: Martin Hicks <mort@wildopensource.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Andrew Morton 2006-01-06 00:11:09 -08:00 committed by Linus Torvalds
parent bbfbb7cec9
commit 7756b9e4e3
4 changed files with 2 additions and 83 deletions

View File

@ -256,7 +256,7 @@
#define __NR_io_submit 248
#define __NR_io_cancel 249
#define __NR_fadvise64 250
#define __NR_set_zone_reclaim 251
/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
#define __NR_exit_group 252
#define __NR_lookup_dcookie 253
#define __NR_epoll_create 254

View File

@ -265,7 +265,7 @@
#define __NR_keyctl 1273
#define __NR_ioprio_set 1274
#define __NR_ioprio_get 1275
#define __NR_set_zone_reclaim 1276
/* 1276 is available for reuse (was briefly sys_set_zone_reclaim) */
#define __NR_inotify_init 1277
#define __NR_inotify_add_watch 1278
#define __NR_inotify_rm_watch 1279

View File

@ -172,7 +172,6 @@ extern void swap_setup(void);
/* linux/mm/vmscan.c */
extern int try_to_free_pages(struct zone **, gfp_t);
extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
extern int shrink_all_memory(int);
extern int vm_swappiness;

View File

@ -74,9 +74,6 @@ struct scan_control {
int may_writepage;
/* Can pages be swapped as part of reclaim? */
int may_swap;
/* This context's SWAP_CLUSTER_MAX. If freeing memory for
* suspend, we effectively ignore SWAP_CLUSTER_MAX.
* In this context, it doesn't matter that we scan the
@ -430,8 +427,6 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
* Try to allocate it some swap space here.
*/
if (PageAnon(page) && !PageSwapCache(page)) {
if (!sc->may_swap)
goto keep_locked;
if (!add_to_swap(page))
goto activate_locked;
}
@ -952,7 +947,6 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
sc.gfp_mask = gfp_mask;
sc.may_writepage = 0;
sc.may_swap = 1;
inc_page_state(allocstall);
@ -1055,7 +1049,6 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int order)
total_reclaimed = 0;
sc.gfp_mask = GFP_KERNEL;
sc.may_writepage = 0;
sc.may_swap = 1;
sc.nr_mapped = read_page_state(nr_mapped);
inc_page_state(pageoutrun);
@ -1353,76 +1346,3 @@ static int __init kswapd_init(void)
}
module_init(kswapd_init)
/*
* Try to free up some pages from this zone through reclaim.
*/
int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
{
struct scan_control sc;
int nr_pages = 1 << order;
int total_reclaimed = 0;
/* The reclaim may sleep, so don't do it if sleep isn't allowed */
if (!(gfp_mask & __GFP_WAIT))
return 0;
if (zone->all_unreclaimable)
return 0;
sc.gfp_mask = gfp_mask;
sc.may_writepage = 0;
sc.may_swap = 0;
sc.nr_mapped = read_page_state(nr_mapped);
sc.nr_scanned = 0;
sc.nr_reclaimed = 0;
/* scan at the highest priority */
sc.priority = 0;
disable_swap_token();
if (nr_pages > SWAP_CLUSTER_MAX)
sc.swap_cluster_max = nr_pages;
else
sc.swap_cluster_max = SWAP_CLUSTER_MAX;
/* Don't reclaim the zone if there are other reclaimers active */
if (atomic_read(&zone->reclaim_in_progress) > 0)
goto out;
shrink_zone(zone, &sc);
total_reclaimed = sc.nr_reclaimed;
out:
return total_reclaimed;
}
asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone,
unsigned int state)
{
struct zone *z;
int i;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (node >= MAX_NUMNODES || !node_online(node))
return -EINVAL;
/* This will break if we ever add more zones */
if (!(zone & (1<<ZONE_DMA|1<<ZONE_NORMAL|1<<ZONE_HIGHMEM)))
return -EINVAL;
for (i = 0; i < MAX_NR_ZONES; i++) {
if (!(zone & 1<<i))
continue;
z = &NODE_DATA(node)->node_zones[i];
if (state)
z->reclaim_pages = 1;
else
z->reclaim_pages = 0;
}
return 0;
}