Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "8 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: fs/exec.c: account for argv/envp pointers ocfs2: fix deadlock caused by recursive locking in xattr slub: make sysfs file removal asynchronous lib/cmdline.c: fix get_options() overflow while parsing ranges fs/dax.c: fix inefficiency in dax_writeback_mapping_range() autofs: sanity check status reported with AUTOFS_DEV_IOCTL_FAIL mm/vmalloc.c: huge-vmap: fail gracefully on unexpected huge vmap mappings mm, thp: remove cond_resched from __collapse_huge_page_copy
This commit is contained in:
commit
337c6ba2d8
|
@ -344,7 +344,7 @@ static int autofs_dev_ioctl_fail(struct file *fp,
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
token = (autofs_wqt_t) param->fail.token;
|
token = (autofs_wqt_t) param->fail.token;
|
||||||
status = param->fail.status ? param->fail.status : -ENOENT;
|
status = param->fail.status < 0 ? param->fail.status : -ENOENT;
|
||||||
return autofs4_wait_release(sbi, token, status);
|
return autofs4_wait_release(sbi, token, status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
1
fs/dax.c
1
fs/dax.c
|
@ -859,6 +859,7 @@ int dax_writeback_mapping_range(struct address_space *mapping,
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
start_index = indices[pvec.nr - 1] + 1;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
put_dax(dax_dev);
|
put_dax(dax_dev);
|
||||||
|
|
28
fs/exec.c
28
fs/exec.c
|
@ -220,8 +220,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
|
||||||
|
|
||||||
if (write) {
|
if (write) {
|
||||||
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
|
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
|
||||||
|
unsigned long ptr_size;
|
||||||
struct rlimit *rlim;
|
struct rlimit *rlim;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since the stack will hold pointers to the strings, we
|
||||||
|
* must account for them as well.
|
||||||
|
*
|
||||||
|
* The size calculation is the entire vma while each arg page is
|
||||||
|
* built, so each time we get here it's calculating how far it
|
||||||
|
* is currently (rather than each call being just the newly
|
||||||
|
* added size from the arg page). As a result, we need to
|
||||||
|
* always add the entire size of the pointers, so that on the
|
||||||
|
* last call to get_arg_page() we'll actually have the entire
|
||||||
|
* correct size.
|
||||||
|
*/
|
||||||
|
ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
|
||||||
|
if (ptr_size > ULONG_MAX - size)
|
||||||
|
goto fail;
|
||||||
|
size += ptr_size;
|
||||||
|
|
||||||
acct_arg_size(bprm, size / PAGE_SIZE);
|
acct_arg_size(bprm, size / PAGE_SIZE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -239,13 +257,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
|
||||||
* to work from.
|
* to work from.
|
||||||
*/
|
*/
|
||||||
rlim = current->signal->rlim;
|
rlim = current->signal->rlim;
|
||||||
if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
|
if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
|
||||||
put_page(page);
|
goto fail;
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return page;
|
return page;
|
||||||
|
|
||||||
|
fail:
|
||||||
|
put_page(page);
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void put_arg_page(struct page *page)
|
static void put_arg_page(struct page *page)
|
||||||
|
|
|
@ -2591,6 +2591,10 @@ void ocfs2_inode_unlock_tracker(struct inode *inode,
|
||||||
struct ocfs2_lock_res *lockres;
|
struct ocfs2_lock_res *lockres;
|
||||||
|
|
||||||
lockres = &OCFS2_I(inode)->ip_inode_lockres;
|
lockres = &OCFS2_I(inode)->ip_inode_lockres;
|
||||||
|
/* had_lock means that the currect process already takes the cluster
|
||||||
|
* lock previously. If had_lock is 1, we have nothing to do here, and
|
||||||
|
* it will get unlocked where we got the lock.
|
||||||
|
*/
|
||||||
if (!had_lock) {
|
if (!had_lock) {
|
||||||
ocfs2_remove_holder(lockres, oh);
|
ocfs2_remove_holder(lockres, oh);
|
||||||
ocfs2_inode_unlock(inode, ex);
|
ocfs2_inode_unlock(inode, ex);
|
||||||
|
|
|
@ -1328,20 +1328,21 @@ static int ocfs2_xattr_get(struct inode *inode,
|
||||||
void *buffer,
|
void *buffer,
|
||||||
size_t buffer_size)
|
size_t buffer_size)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret, had_lock;
|
||||||
struct buffer_head *di_bh = NULL;
|
struct buffer_head *di_bh = NULL;
|
||||||
|
struct ocfs2_lock_holder oh;
|
||||||
|
|
||||||
ret = ocfs2_inode_lock(inode, &di_bh, 0);
|
had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh);
|
||||||
if (ret < 0) {
|
if (had_lock < 0) {
|
||||||
mlog_errno(ret);
|
mlog_errno(had_lock);
|
||||||
return ret;
|
return had_lock;
|
||||||
}
|
}
|
||||||
down_read(&OCFS2_I(inode)->ip_xattr_sem);
|
down_read(&OCFS2_I(inode)->ip_xattr_sem);
|
||||||
ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
|
ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
|
||||||
name, buffer, buffer_size);
|
name, buffer, buffer_size);
|
||||||
up_read(&OCFS2_I(inode)->ip_xattr_sem);
|
up_read(&OCFS2_I(inode)->ip_xattr_sem);
|
||||||
|
|
||||||
ocfs2_inode_unlock(inode, 0);
|
ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
|
||||||
|
|
||||||
brelse(di_bh);
|
brelse(di_bh);
|
||||||
|
|
||||||
|
@ -3537,11 +3538,12 @@ int ocfs2_xattr_set(struct inode *inode,
|
||||||
{
|
{
|
||||||
struct buffer_head *di_bh = NULL;
|
struct buffer_head *di_bh = NULL;
|
||||||
struct ocfs2_dinode *di;
|
struct ocfs2_dinode *di;
|
||||||
int ret, credits, ref_meta = 0, ref_credits = 0;
|
int ret, credits, had_lock, ref_meta = 0, ref_credits = 0;
|
||||||
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
||||||
struct inode *tl_inode = osb->osb_tl_inode;
|
struct inode *tl_inode = osb->osb_tl_inode;
|
||||||
struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, };
|
struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, };
|
||||||
struct ocfs2_refcount_tree *ref_tree = NULL;
|
struct ocfs2_refcount_tree *ref_tree = NULL;
|
||||||
|
struct ocfs2_lock_holder oh;
|
||||||
|
|
||||||
struct ocfs2_xattr_info xi = {
|
struct ocfs2_xattr_info xi = {
|
||||||
.xi_name_index = name_index,
|
.xi_name_index = name_index,
|
||||||
|
@ -3572,8 +3574,9 @@ int ocfs2_xattr_set(struct inode *inode,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = ocfs2_inode_lock(inode, &di_bh, 1);
|
had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh);
|
||||||
if (ret < 0) {
|
if (had_lock < 0) {
|
||||||
|
ret = had_lock;
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
goto cleanup_nolock;
|
goto cleanup_nolock;
|
||||||
}
|
}
|
||||||
|
@ -3670,7 +3673,7 @@ int ocfs2_xattr_set(struct inode *inode,
|
||||||
if (ret)
|
if (ret)
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
}
|
}
|
||||||
ocfs2_inode_unlock(inode, 1);
|
ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
|
||||||
cleanup_nolock:
|
cleanup_nolock:
|
||||||
brelse(di_bh);
|
brelse(di_bh);
|
||||||
brelse(xbs.xattr_bh);
|
brelse(xbs.xattr_bh);
|
||||||
|
|
|
@ -84,6 +84,7 @@ struct kmem_cache {
|
||||||
int red_left_pad; /* Left redzone padding size */
|
int red_left_pad; /* Left redzone padding size */
|
||||||
#ifdef CONFIG_SYSFS
|
#ifdef CONFIG_SYSFS
|
||||||
struct kobject kobj; /* For sysfs */
|
struct kobject kobj; /* For sysfs */
|
||||||
|
struct work_struct kobj_remove_work;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_MEMCG
|
#ifdef CONFIG_MEMCG
|
||||||
struct memcg_cache_params memcg_params;
|
struct memcg_cache_params memcg_params;
|
||||||
|
|
|
@ -23,14 +23,14 @@
|
||||||
* the values[M, M+1, ..., N] into the ints array in get_options.
|
* the values[M, M+1, ..., N] into the ints array in get_options.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int get_range(char **str, int *pint)
|
static int get_range(char **str, int *pint, int n)
|
||||||
{
|
{
|
||||||
int x, inc_counter, upper_range;
|
int x, inc_counter, upper_range;
|
||||||
|
|
||||||
(*str)++;
|
(*str)++;
|
||||||
upper_range = simple_strtol((*str), NULL, 0);
|
upper_range = simple_strtol((*str), NULL, 0);
|
||||||
inc_counter = upper_range - *pint;
|
inc_counter = upper_range - *pint;
|
||||||
for (x = *pint; x < upper_range; x++)
|
for (x = *pint; n && x < upper_range; x++, n--)
|
||||||
*pint++ = x;
|
*pint++ = x;
|
||||||
return inc_counter;
|
return inc_counter;
|
||||||
}
|
}
|
||||||
|
@ -97,7 +97,7 @@ char *get_options(const char *str, int nints, int *ints)
|
||||||
break;
|
break;
|
||||||
if (res == 3) {
|
if (res == 3) {
|
||||||
int range_nums;
|
int range_nums;
|
||||||
range_nums = get_range((char **)&str, ints + i);
|
range_nums = get_range((char **)&str, ints + i, nints - i);
|
||||||
if (range_nums < 0)
|
if (range_nums < 0)
|
||||||
break;
|
break;
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -652,7 +652,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
free_page_and_swap_cache(src_page);
|
free_page_and_swap_cache(src_page);
|
||||||
}
|
}
|
||||||
cond_resched();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
40
mm/slub.c
40
mm/slub.c
|
@ -5625,6 +5625,28 @@ static char *create_unique_id(struct kmem_cache *s)
|
||||||
return name;
|
return name;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void sysfs_slab_remove_workfn(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct kmem_cache *s =
|
||||||
|
container_of(work, struct kmem_cache, kobj_remove_work);
|
||||||
|
|
||||||
|
if (!s->kobj.state_in_sysfs)
|
||||||
|
/*
|
||||||
|
* For a memcg cache, this may be called during
|
||||||
|
* deactivation and again on shutdown. Remove only once.
|
||||||
|
* A cache is never shut down before deactivation is
|
||||||
|
* complete, so no need to worry about synchronization.
|
||||||
|
*/
|
||||||
|
return;
|
||||||
|
|
||||||
|
#ifdef CONFIG_MEMCG
|
||||||
|
kset_unregister(s->memcg_kset);
|
||||||
|
#endif
|
||||||
|
kobject_uevent(&s->kobj, KOBJ_REMOVE);
|
||||||
|
kobject_del(&s->kobj);
|
||||||
|
kobject_put(&s->kobj);
|
||||||
|
}
|
||||||
|
|
||||||
static int sysfs_slab_add(struct kmem_cache *s)
|
static int sysfs_slab_add(struct kmem_cache *s)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
@ -5632,6 +5654,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
|
||||||
struct kset *kset = cache_kset(s);
|
struct kset *kset = cache_kset(s);
|
||||||
int unmergeable = slab_unmergeable(s);
|
int unmergeable = slab_unmergeable(s);
|
||||||
|
|
||||||
|
INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
|
||||||
|
|
||||||
if (!kset) {
|
if (!kset) {
|
||||||
kobject_init(&s->kobj, &slab_ktype);
|
kobject_init(&s->kobj, &slab_ktype);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -5695,20 +5719,8 @@ static void sysfs_slab_remove(struct kmem_cache *s)
|
||||||
*/
|
*/
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!s->kobj.state_in_sysfs)
|
kobject_get(&s->kobj);
|
||||||
/*
|
schedule_work(&s->kobj_remove_work);
|
||||||
* For a memcg cache, this may be called during
|
|
||||||
* deactivation and again on shutdown. Remove only once.
|
|
||||||
* A cache is never shut down before deactivation is
|
|
||||||
* complete, so no need to worry about synchronization.
|
|
||||||
*/
|
|
||||||
return;
|
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG
|
|
||||||
kset_unregister(s->memcg_kset);
|
|
||||||
#endif
|
|
||||||
kobject_uevent(&s->kobj, KOBJ_REMOVE);
|
|
||||||
kobject_del(&s->kobj);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void sysfs_slab_release(struct kmem_cache *s)
|
void sysfs_slab_release(struct kmem_cache *s)
|
||||||
|
|
15
mm/vmalloc.c
15
mm/vmalloc.c
|
@ -287,10 +287,21 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
|
||||||
if (p4d_none(*p4d))
|
if (p4d_none(*p4d))
|
||||||
return NULL;
|
return NULL;
|
||||||
pud = pud_offset(p4d, addr);
|
pud = pud_offset(p4d, addr);
|
||||||
if (pud_none(*pud))
|
|
||||||
|
/*
|
||||||
|
* Don't dereference bad PUD or PMD (below) entries. This will also
|
||||||
|
* identify huge mappings, which we may encounter on architectures
|
||||||
|
* that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
|
||||||
|
* identified as vmalloc addresses by is_vmalloc_addr(), but are
|
||||||
|
* not [unambiguously] associated with a struct page, so there is
|
||||||
|
* no correct value to return for them.
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(pud_bad(*pud));
|
||||||
|
if (pud_none(*pud) || pud_bad(*pud))
|
||||||
return NULL;
|
return NULL;
|
||||||
pmd = pmd_offset(pud, addr);
|
pmd = pmd_offset(pud, addr);
|
||||||
if (pmd_none(*pmd))
|
WARN_ON_ONCE(pmd_bad(*pmd));
|
||||||
|
if (pmd_none(*pmd) || pmd_bad(*pmd))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ptep = pte_offset_map(pmd, addr);
|
ptep = pte_offset_map(pmd, addr);
|
||||||
|
|
Loading…
Reference in New Issue