Merge branch 'akpm'

Quoth Andrew:
  "Random fixes.  And a simple new LED driver which I'm trying to sneak
   in while you're not looking."

Sneaking successful.

* akpm:
  score: fix off-by-one index into syscall table
  mm: fix rss count leakage during migration
  SHM_UNLOCK: fix Unevictable pages stranded after swap
  SHM_UNLOCK: fix long unpreemptible section
  kdump: define KEXEC_NOTE_BYTES arch specific for s390x
  mm/hugetlb.c: undo change to page mapcount in fault handler
  mm: memcg: update the correct soft limit tree during migration
  proc: clear_refs: do not clear reserved pages
  drivers/video/backlight/l4f00242t03.c: return proper error in l4f00242t03_probe if regulator_get() fails
  drivers/video/backlight/adp88x0_bl.c: fix bit testing logic
  kprobes: initialize before using a hlist
  ipc/mqueue: simplify reading msgqueue limit
  leds: add led driver for Bachmann's ot200
  mm: __count_immobile_pages(): make sure the node is online
  mm: fix NULL ptr dereference in __count_immobile_pages
  mm: fix warnings regarding enum migrate_mode
This commit is contained in:
Linus Torvalds 2012-01-23 09:27:54 -08:00
commit 4f57d865f1
24 changed files with 389 additions and 145 deletions

View File

@ -42,6 +42,24 @@
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_S390
/*
* Size for s390x ELF notes per CPU
*
* Seven notes plus zero note at the end: prstatus, fpregset, timer,
* tod_cmp, tod_reg, control regs, and prefix
*/
#define KEXEC_NOTE_BYTES \
(ALIGN(sizeof(struct elf_note), 4) * 8 + \
ALIGN(sizeof("CORE"), 4) * 7 + \
ALIGN(sizeof(struct elf_prstatus), 4) + \
ALIGN(sizeof(elf_fpregset_t), 4) + \
ALIGN(sizeof(u64), 4) + \
ALIGN(sizeof(u64), 4) + \
ALIGN(sizeof(u32), 4) + \
ALIGN(sizeof(u64) * 16, 4) + \
ALIGN(sizeof(u32), 4) \
)
/* Provide a dummy definition to avoid build failures. */
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs) { }

View File

@ -408,7 +408,7 @@ ENTRY(handle_sys)
sw r9, [r0, PT_EPC]
cmpi.c r27, __NR_syscalls # check syscall number
bgtu illegal_syscall
bgeu illegal_syscall
slli r8, r27, 2 # get syscall routine
la r11, sys_call_table

View File

@ -403,6 +403,13 @@ config LEDS_MAX8997
This option enables support for on-chip LED drivers on
MAXIM MAX8997 PMIC.
config LEDS_OT200
tristate "LED support for the Bachmann OT200"
depends on LEDS_CLASS && HAS_IOMEM
help
This option enables support for the LEDs on the Bachmann OT200.
Say Y to enable LEDs on the Bachmann OT200.
config LEDS_TRIGGERS
bool "LED Trigger support"
depends on LEDS_CLASS

View File

@ -28,6 +28,7 @@ obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
obj-$(CONFIG_LEDS_OT200) += leds-ot200.o
obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o

171
drivers/leds/leds-ot200.c Normal file
View File

@ -0,0 +1,171 @@
/*
* Bachmann ot200 leds driver.
*
* Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Christian Gmeiner <christian.gmeiner@gmail.com>
*
* License: GPL as published by the FSF.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/leds.h>
#include <linux/io.h>
#include <linux/module.h>
struct ot200_led {
struct led_classdev cdev;
const char *name;
unsigned long port;
u8 mask;
};
/*
* The device has three leds on the back panel (led_err, led_init and led_run)
* and can handle up to seven leds on the front panel.
*/
static struct ot200_led leds[] = {
{
.name = "led_run",
.port = 0x5a,
.mask = BIT(0),
},
{
.name = "led_init",
.port = 0x5a,
.mask = BIT(1),
},
{
.name = "led_err",
.port = 0x5a,
.mask = BIT(2),
},
{
.name = "led_1",
.port = 0x49,
.mask = BIT(7),
},
{
.name = "led_2",
.port = 0x49,
.mask = BIT(6),
},
{
.name = "led_3",
.port = 0x49,
.mask = BIT(5),
},
{
.name = "led_4",
.port = 0x49,
.mask = BIT(4),
},
{
.name = "led_5",
.port = 0x49,
.mask = BIT(3),
},
{
.name = "led_6",
.port = 0x49,
.mask = BIT(2),
},
{
.name = "led_7",
.port = 0x49,
.mask = BIT(1),
}
};
static DEFINE_SPINLOCK(value_lock);
/*
* we need to store the current led states, as it is not
* possible to read the current led state via inb().
*/
static u8 leds_back;
static u8 leds_front;
static void ot200_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct ot200_led *led = container_of(led_cdev, struct ot200_led, cdev);
u8 *val;
unsigned long flags;
spin_lock_irqsave(&value_lock, flags);
if (led->port == 0x49)
val = &leds_front;
else if (led->port == 0x5a)
val = &leds_back;
else
BUG();
if (value == LED_OFF)
*val &= ~led->mask;
else
*val |= led->mask;
outb(*val, led->port);
spin_unlock_irqrestore(&value_lock, flags);
}
static int __devinit ot200_led_probe(struct platform_device *pdev)
{
int i;
int ret;
for (i = 0; i < ARRAY_SIZE(leds); i++) {
leds[i].cdev.name = leds[i].name;
leds[i].cdev.brightness_set = ot200_led_brightness_set;
ret = led_classdev_register(&pdev->dev, &leds[i].cdev);
if (ret < 0)
goto err;
}
leds_front = 0; /* turn off all front leds */
leds_back = BIT(1); /* turn on init led */
outb(leds_front, 0x49);
outb(leds_back, 0x5a);
return 0;
err:
for (i = i - 1; i >= 0; i--)
led_classdev_unregister(&leds[i].cdev);
return ret;
}
static int __devexit ot200_led_remove(struct platform_device *pdev)
{
int i;
for (i = 0; i < ARRAY_SIZE(leds); i++)
led_classdev_unregister(&leds[i].cdev);
return 0;
}
static struct platform_driver ot200_led_driver = {
.probe = ot200_led_probe,
.remove = __devexit_p(ot200_led_remove),
.driver = {
.name = "leds-ot200",
.owner = THIS_MODULE,
},
};
module_platform_driver(ot200_led_driver);
MODULE_AUTHOR("Sebastian A. Siewior <bigeasy@linutronix.de>");
MODULE_DESCRIPTION("ot200 LED driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:leds-ot200");

View File

@ -146,7 +146,7 @@ static int adp8860_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask
ret = adp8860_read(client, reg, &reg_val);
if (!ret && ((reg_val & bit_mask) == 0)) {
if (!ret && ((reg_val & bit_mask) != bit_mask)) {
reg_val |= bit_mask;
ret = adp8860_write(client, reg, reg_val);
}

View File

@ -160,7 +160,7 @@ static int adp8870_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask
ret = adp8870_read(client, reg, &reg_val);
if (!ret && ((reg_val & bit_mask) == 0)) {
if (!ret && ((reg_val & bit_mask) != bit_mask)) {
reg_val |= bit_mask;
ret = adp8870_write(client, reg, reg_val);
}

View File

@ -190,6 +190,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
priv->io_reg = regulator_get(&spi->dev, "vdd");
if (IS_ERR(priv->io_reg)) {
ret = PTR_ERR(priv->io_reg);
dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
__func__);
goto err3;
@ -197,6 +198,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
priv->core_reg = regulator_get(&spi->dev, "vcore");
if (IS_ERR(priv->core_reg)) {
ret = PTR_ERR(priv->core_reg);
dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
__func__);
goto err4;

View File

@ -518,6 +518,9 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
if (!page)
continue;
if (PageReserved(page))
continue;
/* Clear accessed and referenced bits. */
ptep_test_and_clear_young(vma, addr, pte);
ClearPageReferenced(page);

View File

@ -10,6 +10,7 @@
#include <linux/ioctl.h>
#include <linux/blk_types.h>
#include <linux/types.h>
#include <linux/migrate_mode.h>
/*
* It's silly to have NR_OPEN bigger than NR_FILE, but you can change
@ -526,7 +527,6 @@ enum positive_aop_returns {
struct page;
struct address_space;
struct writeback_control;
enum migrate_mode;
struct iov_iter {
const struct iovec *iov;

View File

@ -50,9 +50,11 @@
* note header. For kdump, the code in vmcore.c runs in the context
* of the second kernel to combine them into one note.
*/
#ifndef KEXEC_NOTE_BYTES
#define KEXEC_NOTE_BYTES ( (KEXEC_NOTE_HEAD_BYTES * 2) + \
KEXEC_CORE_NOTE_NAME_BYTES + \
KEXEC_CORE_NOTE_DESC_BYTES )
#endif
/*
* This structure is used to hold the arguments that are used when loading

View File

@ -3,22 +3,10 @@
#include <linux/mm.h>
#include <linux/mempolicy.h>
#include <linux/migrate_mode.h>
typedef struct page *new_page_t(struct page *, unsigned long private, int **);
/*
* MIGRATE_ASYNC means never block
* MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking
* on most operations but not ->writepage as the potential stall time
* is too significant
* MIGRATE_SYNC will block when migrating pages
*/
enum migrate_mode {
MIGRATE_ASYNC,
MIGRATE_SYNC_LIGHT,
MIGRATE_SYNC,
};
#ifdef CONFIG_MIGRATION
#define PAGE_MIGRATION 1

View File

@ -0,0 +1,16 @@
#ifndef MIGRATE_MODE_H_INCLUDED
#define MIGRATE_MODE_H_INCLUDED
/*
* MIGRATE_ASYNC means never block
* MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking
* on most operations but not ->writepage as the potential stall time
* is too significant
* MIGRATE_SYNC will block when migrating pages
*/
enum migrate_mode {
MIGRATE_ASYNC,
MIGRATE_SYNC_LIGHT,
MIGRATE_SYNC,
};
#endif /* MIGRATE_MODE_H_INCLUDED */

View File

@ -48,6 +48,7 @@ extern struct file *shmem_file_setup(const char *name,
loff_t size, unsigned long flags);
extern int shmem_zero_setup(struct vm_area_struct *);
extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
extern void shmem_unlock_mapping(struct address_space *mapping);
extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);

View File

@ -273,7 +273,7 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
#endif
extern int page_evictable(struct page *page, struct vm_area_struct *vma);
extern void scan_mapping_unevictable_pages(struct address_space *);
extern void check_move_unevictable_pages(struct page **, int nr_pages);
extern unsigned long scan_unevictable_pages;
extern int scan_unevictable_handler(struct ctl_table *, int,

View File

@ -128,7 +128,6 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
if (S_ISREG(mode)) {
struct mqueue_inode_info *info;
struct task_struct *p = current;
unsigned long mq_bytes, mq_msg_tblsz;
inode->i_fop = &mqueue_file_operations;
@ -159,7 +158,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
spin_lock(&mq_lock);
if (u->mq_bytes + mq_bytes < u->mq_bytes ||
u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
spin_unlock(&mq_lock);
/* mqueue_evict_inode() releases info->messages */
ret = -EMFILE;

View File

@ -870,9 +870,7 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
case SHM_LOCK:
case SHM_UNLOCK:
{
struct file *uninitialized_var(shm_file);
lru_add_drain_all(); /* drain pagevecs to lru lists */
struct file *shm_file;
shp = shm_lock_check(ns, shmid);
if (IS_ERR(shp)) {
@ -896,21 +894,30 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
if (err)
goto out_unlock;
if(cmd==SHM_LOCK) {
shm_file = shp->shm_file;
if (is_file_hugepages(shm_file))
goto out_unlock;
if (cmd == SHM_LOCK) {
struct user_struct *user = current_user();
if (!is_file_hugepages(shp->shm_file)) {
err = shmem_lock(shp->shm_file, 1, user);
if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
err = shmem_lock(shm_file, 1, user);
if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
shp->shm_perm.mode |= SHM_LOCKED;
shp->mlock_user = user;
}
goto out_unlock;
}
} else if (!is_file_hugepages(shp->shm_file)) {
shmem_lock(shp->shm_file, 0, shp->mlock_user);
/* SHM_UNLOCK */
if (!(shp->shm_perm.mode & SHM_LOCKED))
goto out_unlock;
shmem_lock(shm_file, 0, shp->mlock_user);
shp->shm_perm.mode &= ~SHM_LOCKED;
shp->mlock_user = NULL;
}
get_file(shm_file);
shm_unlock(shp);
shmem_unlock_mapping(shm_file->f_mapping);
fput(shm_file);
goto out;
}
case IPC_RMID:

View File

@ -1077,6 +1077,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
/* Early boot. kretprobe_table_locks not yet initialized. */
return;
INIT_HLIST_HEAD(&empty_rp);
hash = hash_ptr(tk, KPROBE_HASH_BITS);
head = &kretprobe_inst_table[hash];
kretprobe_table_lock(hash, &flags);
@ -1085,7 +1086,6 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
recycle_rp_inst(ri, &empty_rp);
}
kretprobe_table_unlock(hash, &flags);
INIT_HLIST_HEAD(&empty_rp);
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);

View File

@ -2508,6 +2508,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
{
struct hstate *h = hstate_vma(vma);
int ret = VM_FAULT_SIGBUS;
int anon_rmap = 0;
pgoff_t idx;
unsigned long size;
struct page *page;
@ -2562,14 +2563,13 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(h);
spin_unlock(&inode->i_lock);
page_dup_rmap(page);
} else {
lock_page(page);
if (unlikely(anon_vma_prepare(vma))) {
ret = VM_FAULT_OOM;
goto backout_unlocked;
}
hugepage_add_new_anon_rmap(page, vma, address);
anon_rmap = 1;
}
} else {
/*
@ -2582,7 +2582,6 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
VM_FAULT_SET_HINDEX(h - hstates);
goto backout_unlocked;
}
page_dup_rmap(page);
}
/*
@ -2606,6 +2605,10 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!huge_pte_none(huge_ptep_get(ptep)))
goto backout;
if (anon_rmap)
hugepage_add_new_anon_rmap(page, vma, address);
else
page_dup_rmap(page);
new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte);

View File

@ -3247,7 +3247,7 @@ int mem_cgroup_prepare_migration(struct page *page,
ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
else
ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
__mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
__mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype);
return ret;
}

View File

@ -878,17 +878,26 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
}
if (likely(!non_swap_entry(entry)))
rss[MM_SWAPENTS]++;
else if (is_write_migration_entry(entry) &&
else if (is_migration_entry(entry)) {
page = migration_entry_to_page(entry);
if (PageAnon(page))
rss[MM_ANONPAGES]++;
else
rss[MM_FILEPAGES]++;
if (is_write_migration_entry(entry) &&
is_cow_mapping(vm_flags)) {
/*
* COW mappings require pages in both parent
* and child to be set to read.
* COW mappings require pages in both
* parent and child to be set to read.
*/
make_migration_entry_read(&entry);
pte = swp_entry_to_pte(entry);
set_pte_at(src_mm, addr, src_pte, pte);
}
}
}
goto out_set_pte;
}
@ -1191,6 +1200,16 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
if (!non_swap_entry(entry))
rss[MM_SWAPENTS]--;
else if (is_migration_entry(entry)) {
struct page *page;
page = migration_entry_to_page(entry);
if (PageAnon(page))
rss[MM_ANONPAGES]--;
else
rss[MM_FILEPAGES]--;
}
if (unlikely(!free_swap_and_cache(entry)))
print_bad_pte(vma, addr, ptent, NULL);
}

View File

@ -5413,7 +5413,25 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
bool is_pageblock_removable_nolock(struct page *page)
{
struct zone *zone = page_zone(page);
struct zone *zone;
unsigned long pfn;
/*
* We have to be careful here because we are iterating over memory
* sections which are not zone aware so we might end up outside of
* the zone but still within the section.
* We have to take care about the node as well. If the node is offline
* its NODE_DATA will be NULL - see page_zone.
*/
if (!node_online(page_to_nid(page)))
return false;
zone = page_zone(page);
pfn = page_to_pfn(page);
if (zone->zone_start_pfn > pfn ||
zone->zone_start_pfn + zone->spanned_pages <= pfn)
return false;
return __count_immobile_pages(zone, page, 0);
}

View File

@ -379,7 +379,7 @@ static int shmem_free_swap(struct address_space *mapping,
/*
* Pagevec may contain swap entries, so shuffle up pages before releasing.
*/
static void shmem_pagevec_release(struct pagevec *pvec)
static void shmem_deswap_pagevec(struct pagevec *pvec)
{
int i, j;
@ -389,7 +389,36 @@ static void shmem_pagevec_release(struct pagevec *pvec)
pvec->pages[j++] = page;
}
pvec->nr = j;
pagevec_release(pvec);
}
/*
* SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
*/
void shmem_unlock_mapping(struct address_space *mapping)
{
struct pagevec pvec;
pgoff_t indices[PAGEVEC_SIZE];
pgoff_t index = 0;
pagevec_init(&pvec, 0);
/*
* Minor point, but we might as well stop if someone else SHM_LOCKs it.
*/
while (!mapping_unevictable(mapping)) {
/*
* Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
* has finished, if it hits a row of PAGEVEC_SIZE swap entries.
*/
pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
PAGEVEC_SIZE, pvec.pages, indices);
if (!pvec.nr)
break;
index = indices[pvec.nr - 1] + 1;
shmem_deswap_pagevec(&pvec);
check_move_unevictable_pages(pvec.pages, pvec.nr);
pagevec_release(&pvec);
cond_resched();
}
}
/*
@ -440,7 +469,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
}
unlock_page(page);
}
shmem_pagevec_release(&pvec);
shmem_deswap_pagevec(&pvec);
pagevec_release(&pvec);
mem_cgroup_uncharge_end();
cond_resched();
index++;
@ -470,7 +500,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
continue;
}
if (index == start && indices[0] > end) {
shmem_pagevec_release(&pvec);
shmem_deswap_pagevec(&pvec);
pagevec_release(&pvec);
break;
}
mem_cgroup_uncharge_start();
@ -494,7 +525,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
}
unlock_page(page);
}
shmem_pagevec_release(&pvec);
shmem_deswap_pagevec(&pvec);
pagevec_release(&pvec);
mem_cgroup_uncharge_end();
index++;
}
@ -1068,13 +1100,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
user_shm_unlock(inode->i_size, user);
info->flags &= ~VM_LOCKED;
mapping_clear_unevictable(file->f_mapping);
/*
* Ensure that a racing putback_lru_page() can see
* the pages of this mapping are evictable when we
* skip them due to !PageLRU during the scan.
*/
smp_mb__after_clear_bit();
scan_mapping_unevictable_pages(file->f_mapping);
}
retval = 0;
@ -2445,6 +2470,10 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
return 0;
}
void shmem_unlock_mapping(struct address_space *mapping)
{
}
void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
{
truncate_inode_pages_range(inode->i_mapping, lstart, lend);

View File

@ -26,7 +26,6 @@
#include <linux/buffer_head.h> /* for try_to_release_page(),
buffer_heads_over_limit */
#include <linux/mm_inline.h>
#include <linux/pagevec.h>
#include <linux/backing-dev.h>
#include <linux/rmap.h>
#include <linux/topology.h>
@ -661,7 +660,7 @@ void putback_lru_page(struct page *page)
* When racing with an mlock or AS_UNEVICTABLE clearing
* (page is unlocked) make sure that if the other thread
* does not observe our setting of PG_lru and fails
* isolation/check_move_unevictable_page,
* isolation/check_move_unevictable_pages,
* we see PG_mlocked/AS_UNEVICTABLE cleared below and move
* the page back to the evictable list.
*
@ -3499,82 +3498,30 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
return 1;
}
#ifdef CONFIG_SHMEM
/**
* check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
* @page: page to check evictability and move to appropriate lru list
* @zone: zone page is in
* check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
* @pages: array of pages to check
* @nr_pages: number of pages to check
*
* Checks a page for evictability and moves the page to the appropriate
* zone lru list.
* Checks pages for evictability and moves them to the appropriate lru list.
*
* Restrictions: zone->lru_lock must be held, page must be on LRU and must
* have PageUnevictable set.
* This function is only used for SysV IPC SHM_UNLOCK.
*/
static void check_move_unevictable_page(struct page *page, struct zone *zone)
void check_move_unevictable_pages(struct page **pages, int nr_pages)
{
struct lruvec *lruvec;
VM_BUG_ON(PageActive(page));
retry:
ClearPageUnevictable(page);
if (page_evictable(page, NULL)) {
enum lru_list l = page_lru_base_type(page);
__dec_zone_state(zone, NR_UNEVICTABLE);
lruvec = mem_cgroup_lru_move_lists(zone, page,
LRU_UNEVICTABLE, l);
list_move(&page->lru, &lruvec->lists[l]);
__inc_zone_state(zone, NR_INACTIVE_ANON + l);
__count_vm_event(UNEVICTABLE_PGRESCUED);
} else {
/*
* rotate unevictable list
*/
SetPageUnevictable(page);
lruvec = mem_cgroup_lru_move_lists(zone, page, LRU_UNEVICTABLE,
LRU_UNEVICTABLE);
list_move(&page->lru, &lruvec->lists[LRU_UNEVICTABLE]);
if (page_evictable(page, NULL))
goto retry;
}
}
/**
* scan_mapping_unevictable_pages - scan an address space for evictable pages
* @mapping: struct address_space to scan for evictable pages
*
* Scan all pages in mapping. Check unevictable pages for
* evictability and move them to the appropriate zone lru list.
*/
void scan_mapping_unevictable_pages(struct address_space *mapping)
{
pgoff_t next = 0;
pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
struct zone *zone;
struct pagevec pvec;
if (mapping->nrpages == 0)
return;
pagevec_init(&pvec, 0);
while (next < end &&
pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
struct zone *zone = NULL;
int pgscanned = 0;
int pgrescued = 0;
int i;
int pg_scanned = 0;
zone = NULL;
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
pgoff_t page_index = page->index;
struct zone *pagezone = page_zone(page);
pg_scanned++;
if (page_index > next)
next = page_index;
next++;
for (i = 0; i < nr_pages; i++) {
struct page *page = pages[i];
struct zone *pagezone;
pgscanned++;
pagezone = page_zone(page);
if (pagezone != zone) {
if (zone)
spin_unlock_irq(&zone->lru_lock);
@ -3582,17 +3529,30 @@ void scan_mapping_unevictable_pages(struct address_space *mapping)
spin_lock_irq(&zone->lru_lock);
}
if (PageLRU(page) && PageUnevictable(page))
check_move_unevictable_page(page, zone);
if (!PageLRU(page) || !PageUnevictable(page))
continue;
if (page_evictable(page, NULL)) {
enum lru_list lru = page_lru_base_type(page);
VM_BUG_ON(PageActive(page));
ClearPageUnevictable(page);
__dec_zone_state(zone, NR_UNEVICTABLE);
lruvec = mem_cgroup_lru_move_lists(zone, page,
LRU_UNEVICTABLE, lru);
list_move(&page->lru, &lruvec->lists[lru]);
__inc_zone_state(zone, NR_INACTIVE_ANON + lru);
pgrescued++;
}
if (zone)
}
if (zone) {
__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
spin_unlock_irq(&zone->lru_lock);
pagevec_release(&pvec);
count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
}
}
#endif /* CONFIG_SHMEM */
static void warn_scan_unevictable_pages(void)
{