mm: remove the pgprot argument to __vmalloc
The pgprot argument to __vmalloc is always PAGE_KERNEL now, so remove it. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Michael Kelley <mikelley@microsoft.com> [hyperv] Acked-by: Gao Xiang <xiang@kernel.org> [erofs] Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Wei Liu <wei.liu@kernel.org> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Airlie <airlied@linux.ie> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haiyang Zhang <haiyangz@microsoft.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: "K. Y. Srinivasan" <kys@microsoft.com> Cc: Laura Abbott <labbott@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Sakari Ailus <sakari.ailus@linux.intel.com> Cc: Stephen Hemminger <sthemmin@microsoft.com> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mackerras <paulus@ozlabs.org> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Link: http://lkml.kernel.org/r/20200414131348.444715-22-hch@lst.de Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d28ff991b2
commit
88dca4ca5a
|
@ -97,8 +97,7 @@ static int hv_cpu_init(unsigned int cpu)
|
||||||
* not be stopped in the case of CPU offlining and the VM will hang.
|
* not be stopped in the case of CPU offlining and the VM will hang.
|
||||||
*/
|
*/
|
||||||
if (!*hvp) {
|
if (!*hvp) {
|
||||||
*hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO,
|
*hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO);
|
||||||
PAGE_KERNEL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (*hvp) {
|
if (*hvp) {
|
||||||
|
|
|
@ -1279,8 +1279,7 @@ extern struct kmem_cache *x86_fpu_cache;
|
||||||
#define __KVM_HAVE_ARCH_VM_ALLOC
|
#define __KVM_HAVE_ARCH_VM_ALLOC
|
||||||
static inline struct kvm *kvm_arch_alloc_vm(void)
|
static inline struct kvm *kvm_arch_alloc_vm(void)
|
||||||
{
|
{
|
||||||
return __vmalloc(kvm_x86_ops.vm_size,
|
return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
||||||
GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL);
|
|
||||||
}
|
}
|
||||||
void kvm_arch_free_vm(struct kvm *kvm);
|
void kvm_arch_free_vm(struct kvm *kvm);
|
||||||
|
|
||||||
|
|
|
@ -336,8 +336,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
|
||||||
/* Avoid using vmalloc for smaller buffers. */
|
/* Avoid using vmalloc for smaller buffers. */
|
||||||
size = npages * sizeof(struct page *);
|
size = npages * sizeof(struct page *);
|
||||||
if (size > PAGE_SIZE)
|
if (size > PAGE_SIZE)
|
||||||
pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO,
|
pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
||||||
PAGE_KERNEL);
|
|
||||||
else
|
else
|
||||||
pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
|
pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
|
||||||
|
|
||||||
|
|
|
@ -396,9 +396,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
|
||||||
bytes = sizeof(struct page *)*want;
|
bytes = sizeof(struct page *)*want;
|
||||||
new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
|
new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
|
||||||
if (!new_pages) {
|
if (!new_pages) {
|
||||||
new_pages = __vmalloc(bytes,
|
new_pages = __vmalloc(bytes, GFP_NOIO | __GFP_ZERO);
|
||||||
GFP_NOIO | __GFP_ZERO,
|
|
||||||
PAGE_KERNEL);
|
|
||||||
if (!new_pages)
|
if (!new_pages)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -154,8 +154,8 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
|
||||||
file_size += sizeof(*iter.hdr) * n_obj;
|
file_size += sizeof(*iter.hdr) * n_obj;
|
||||||
|
|
||||||
/* Allocate the file in vmalloc memory, it's likely to be big */
|
/* Allocate the file in vmalloc memory, it's likely to be big */
|
||||||
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
|
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
|
||||||
PAGE_KERNEL);
|
__GFP_NORETRY);
|
||||||
if (!iter.start) {
|
if (!iter.start) {
|
||||||
mutex_unlock(&gpu->mmu_context->lock);
|
mutex_unlock(&gpu->mmu_context->lock);
|
||||||
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
|
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
|
||||||
|
|
|
@ -145,9 +145,8 @@ static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
map_size = pblk_trans_map_size(pblk);
|
map_size = pblk_trans_map_size(pblk);
|
||||||
pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN
|
pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN |
|
||||||
| __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM,
|
__GFP_RETRY_MAYFAIL | __GFP_HIGHMEM);
|
||||||
PAGE_KERNEL);
|
|
||||||
if (!pblk->trans_map) {
|
if (!pblk->trans_map) {
|
||||||
pblk_err(pblk, "failed to allocate L2P (need %zu of memory)\n",
|
pblk_err(pblk, "failed to allocate L2P (need %zu of memory)\n",
|
||||||
map_size);
|
map_size);
|
||||||
|
|
|
@ -400,13 +400,13 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
|
||||||
*/
|
*/
|
||||||
if (gfp_mask & __GFP_NORETRY) {
|
if (gfp_mask & __GFP_NORETRY) {
|
||||||
unsigned noio_flag = memalloc_noio_save();
|
unsigned noio_flag = memalloc_noio_save();
|
||||||
void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
|
void *ptr = __vmalloc(c->block_size, gfp_mask);
|
||||||
|
|
||||||
memalloc_noio_restore(noio_flag);
|
memalloc_noio_restore(noio_flag);
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
|
return __vmalloc(c->block_size, gfp_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1297,7 +1297,7 @@ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
|
||||||
if (!ubi_dbg_chk_io(ubi))
|
if (!ubi_dbg_chk_io(ubi))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
|
buf1 = __vmalloc(len, GFP_NOFS);
|
||||||
if (!buf1) {
|
if (!buf1) {
|
||||||
ubi_err(ubi, "cannot allocate memory to check writes");
|
ubi_err(ubi, "cannot allocate memory to check writes");
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1361,7 +1361,7 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
|
||||||
if (!ubi_dbg_chk_io(ubi))
|
if (!ubi_dbg_chk_io(ubi))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
|
buf = __vmalloc(len, GFP_NOFS);
|
||||||
if (!buf) {
|
if (!buf) {
|
||||||
ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
|
ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -136,8 +136,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
|
||||||
|
|
||||||
while (bufsize >= SECTOR_SIZE) {
|
while (bufsize >= SECTOR_SIZE) {
|
||||||
buf = __vmalloc(bufsize,
|
buf = __vmalloc(bufsize,
|
||||||
GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY,
|
GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY);
|
||||||
PAGE_KERNEL);
|
|
||||||
if (buf) {
|
if (buf) {
|
||||||
*buflen = bufsize;
|
*buflen = bufsize;
|
||||||
return buf;
|
return buf;
|
||||||
|
|
|
@ -354,7 +354,7 @@ static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip)
|
||||||
|
|
||||||
hc = kmalloc(hsize, GFP_NOFS | __GFP_NOWARN);
|
hc = kmalloc(hsize, GFP_NOFS | __GFP_NOWARN);
|
||||||
if (hc == NULL)
|
if (hc == NULL)
|
||||||
hc = __vmalloc(hsize, GFP_NOFS, PAGE_KERNEL);
|
hc = __vmalloc(hsize, GFP_NOFS);
|
||||||
|
|
||||||
if (hc == NULL)
|
if (hc == NULL)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
@ -1166,7 +1166,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
|
||||||
|
|
||||||
hc2 = kmalloc_array(hsize_bytes, 2, GFP_NOFS | __GFP_NOWARN);
|
hc2 = kmalloc_array(hsize_bytes, 2, GFP_NOFS | __GFP_NOWARN);
|
||||||
if (hc2 == NULL)
|
if (hc2 == NULL)
|
||||||
hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS, PAGE_KERNEL);
|
hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS);
|
||||||
|
|
||||||
if (!hc2)
|
if (!hc2)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -1327,7 +1327,7 @@ static void *gfs2_alloc_sort_buffer(unsigned size)
|
||||||
if (size < KMALLOC_MAX_SIZE)
|
if (size < KMALLOC_MAX_SIZE)
|
||||||
ptr = kmalloc(size, GFP_NOFS | __GFP_NOWARN);
|
ptr = kmalloc(size, GFP_NOFS | __GFP_NOWARN);
|
||||||
if (!ptr)
|
if (!ptr)
|
||||||
ptr = __vmalloc(size, GFP_NOFS, PAGE_KERNEL);
|
ptr = __vmalloc(size, GFP_NOFS);
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1987,8 +1987,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
|
||||||
|
|
||||||
ht = kzalloc(size, GFP_NOFS | __GFP_NOWARN);
|
ht = kzalloc(size, GFP_NOFS | __GFP_NOWARN);
|
||||||
if (ht == NULL)
|
if (ht == NULL)
|
||||||
ht = __vmalloc(size, GFP_NOFS | __GFP_NOWARN | __GFP_ZERO,
|
ht = __vmalloc(size, GFP_NOFS | __GFP_NOWARN | __GFP_ZERO);
|
||||||
PAGE_KERNEL);
|
|
||||||
if (!ht)
|
if (!ht)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -1365,7 +1365,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
|
||||||
sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
|
sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
|
||||||
if (sdp->sd_quota_bitmap == NULL)
|
if (sdp->sd_quota_bitmap == NULL)
|
||||||
sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
|
sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
|
||||||
__GFP_ZERO, PAGE_KERNEL);
|
__GFP_ZERO);
|
||||||
if (!sdp->sd_quota_bitmap)
|
if (!sdp->sd_quota_bitmap)
|
||||||
return error;
|
return error;
|
||||||
|
|
||||||
|
|
|
@ -582,7 +582,7 @@ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg)
|
||||||
if (!arg->layoutupdate_pages)
|
if (!arg->layoutupdate_pages)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
start_p = __vmalloc(buffer_size, GFP_NOFS, PAGE_KERNEL);
|
start_p = __vmalloc(buffer_size, GFP_NOFS);
|
||||||
if (!start_p) {
|
if (!start_p) {
|
||||||
kfree(arg->layoutupdate_pages);
|
kfree(arg->layoutupdate_pages);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -34,7 +34,7 @@ static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask)
|
||||||
/* return (void *)__get_free_page(gfp_mask); */
|
/* return (void *)__get_free_page(gfp_mask); */
|
||||||
}
|
}
|
||||||
if (likely((size >> PAGE_SHIFT) < totalram_pages()))
|
if (likely((size >> PAGE_SHIFT) < totalram_pages()))
|
||||||
return __vmalloc(size, gfp_mask, PAGE_KERNEL);
|
return __vmalloc(size, gfp_mask);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -815,7 +815,7 @@ void ubifs_dump_leb(const struct ubifs_info *c, int lnum)
|
||||||
|
|
||||||
pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum);
|
pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum);
|
||||||
|
|
||||||
buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
|
buf = __vmalloc(c->leb_size, GFP_NOFS);
|
||||||
if (!buf) {
|
if (!buf) {
|
||||||
ubifs_err(c, "cannot allocate memory for dumping LEB %d", lnum);
|
ubifs_err(c, "cannot allocate memory for dumping LEB %d", lnum);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -1095,7 +1095,7 @@ static int scan_check_cb(struct ubifs_info *c,
|
||||||
return LPT_SCAN_CONTINUE;
|
return LPT_SCAN_CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
|
buf = __vmalloc(c->leb_size, GFP_NOFS);
|
||||||
if (!buf)
|
if (!buf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -1596,7 +1596,7 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum)
|
||||||
if (!dbg_is_chk_lprops(c))
|
if (!dbg_is_chk_lprops(c))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
|
buf = p = __vmalloc(c->leb_size, GFP_NOFS);
|
||||||
if (!buf) {
|
if (!buf) {
|
||||||
ubifs_err(c, "cannot allocate memory for ltab checking");
|
ubifs_err(c, "cannot allocate memory for ltab checking");
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1845,7 +1845,7 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
|
||||||
void *buf, *p;
|
void *buf, *p;
|
||||||
|
|
||||||
pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum);
|
pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum);
|
||||||
buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
|
buf = p = __vmalloc(c->leb_size, GFP_NOFS);
|
||||||
if (!buf) {
|
if (!buf) {
|
||||||
ubifs_err(c, "cannot allocate memory to dump LPT");
|
ubifs_err(c, "cannot allocate memory to dump LPT");
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -977,7 +977,7 @@ static int dbg_scan_orphans(struct ubifs_info *c, struct check_info *ci)
|
||||||
if (c->no_orphs)
|
if (c->no_orphs)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
|
buf = __vmalloc(c->leb_size, GFP_NOFS);
|
||||||
if (!buf) {
|
if (!buf) {
|
||||||
ubifs_err(c, "cannot allocate memory to check orphans");
|
ubifs_err(c, "cannot allocate memory to check orphans");
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -48,7 +48,7 @@ __kmem_vmalloc(size_t size, xfs_km_flags_t flags)
|
||||||
if (flags & KM_NOFS)
|
if (flags & KM_NOFS)
|
||||||
nofs_flag = memalloc_nofs_save();
|
nofs_flag = memalloc_nofs_save();
|
||||||
|
|
||||||
ptr = __vmalloc(size, lflags, PAGE_KERNEL);
|
ptr = __vmalloc(size, lflags);
|
||||||
|
|
||||||
if (flags & KM_NOFS)
|
if (flags & KM_NOFS)
|
||||||
memalloc_nofs_restore(nofs_flag);
|
memalloc_nofs_restore(nofs_flag);
|
||||||
|
|
|
@ -110,7 +110,7 @@ extern void *vmalloc_user_node_flags(unsigned long size, int node, gfp_t flags);
|
||||||
extern void *vmalloc_exec(unsigned long size);
|
extern void *vmalloc_exec(unsigned long size);
|
||||||
extern void *vmalloc_32(unsigned long size);
|
extern void *vmalloc_32(unsigned long size);
|
||||||
extern void *vmalloc_32_user(unsigned long size);
|
extern void *vmalloc_32_user(unsigned long size);
|
||||||
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
|
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
|
||||||
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
|
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
|
||||||
unsigned long start, unsigned long end, gfp_t gfp_mask,
|
unsigned long start, unsigned long end, gfp_t gfp_mask,
|
||||||
pgprot_t prot, unsigned long vm_flags, int node,
|
pgprot_t prot, unsigned long vm_flags, int node,
|
||||||
|
|
|
@ -82,7 +82,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
|
||||||
struct bpf_prog *fp;
|
struct bpf_prog *fp;
|
||||||
|
|
||||||
size = round_up(size, PAGE_SIZE);
|
size = round_up(size, PAGE_SIZE);
|
||||||
fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
|
fp = __vmalloc(size, gfp_flags);
|
||||||
if (fp == NULL)
|
if (fp == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -232,7 +232,7 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
|
||||||
if (ret)
|
if (ret)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
|
fp = __vmalloc(size, gfp_flags);
|
||||||
if (fp == NULL) {
|
if (fp == NULL) {
|
||||||
__bpf_prog_uncharge(fp_old->aux->user, delta);
|
__bpf_prog_uncharge(fp_old->aux->user, delta);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1089,7 +1089,7 @@ static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
|
||||||
gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
|
gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
|
||||||
struct bpf_prog *fp;
|
struct bpf_prog *fp;
|
||||||
|
|
||||||
fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
|
fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
|
||||||
if (fp != NULL) {
|
if (fp != NULL) {
|
||||||
/* aux->prog still points to the fp_other one, so
|
/* aux->prog still points to the fp_other one, so
|
||||||
* when promoting the clone to the real program,
|
* when promoting the clone to the real program,
|
||||||
|
|
|
@ -20,7 +20,7 @@ struct group_info *groups_alloc(int gidsetsize)
|
||||||
len = sizeof(struct group_info) + sizeof(kgid_t) * gidsetsize;
|
len = sizeof(struct group_info) + sizeof(kgid_t) * gidsetsize;
|
||||||
gi = kmalloc(len, GFP_KERNEL_ACCOUNT|__GFP_NOWARN|__GFP_NORETRY);
|
gi = kmalloc(len, GFP_KERNEL_ACCOUNT|__GFP_NOWARN|__GFP_NORETRY);
|
||||||
if (!gi)
|
if (!gi)
|
||||||
gi = __vmalloc(len, GFP_KERNEL_ACCOUNT, PAGE_KERNEL);
|
gi = __vmalloc(len, GFP_KERNEL_ACCOUNT);
|
||||||
if (!gi)
|
if (!gi)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -2946,8 +2946,7 @@ static int copy_module_from_user(const void __user *umod, unsigned long len,
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
/* Suck in entire file: we'll want most of it. */
|
/* Suck in entire file: we'll want most of it. */
|
||||||
info->hdr = __vmalloc(info->len,
|
info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN);
|
||||||
GFP_KERNEL | __GFP_NOWARN, PAGE_KERNEL);
|
|
||||||
if (!info->hdr)
|
if (!info->hdr)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
15
mm/nommu.c
15
mm/nommu.c
|
@ -140,7 +140,7 @@ void vfree(const void *addr)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(vfree);
|
EXPORT_SYMBOL(vfree);
|
||||||
|
|
||||||
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
|
void *__vmalloc(unsigned long size, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
|
* You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
|
||||||
|
@ -152,14 +152,14 @@ EXPORT_SYMBOL(__vmalloc);
|
||||||
|
|
||||||
void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags)
|
void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags)
|
||||||
{
|
{
|
||||||
return __vmalloc(size, flags, PAGE_KERNEL);
|
return __vmalloc(size, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
|
static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
ret = __vmalloc(size, flags, PAGE_KERNEL);
|
ret = __vmalloc(size, flags);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
|
@ -230,7 +230,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
|
||||||
*/
|
*/
|
||||||
void *vmalloc(unsigned long size)
|
void *vmalloc(unsigned long size)
|
||||||
{
|
{
|
||||||
return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
|
return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(vmalloc);
|
EXPORT_SYMBOL(vmalloc);
|
||||||
|
|
||||||
|
@ -248,8 +248,7 @@ EXPORT_SYMBOL(vmalloc);
|
||||||
*/
|
*/
|
||||||
void *vzalloc(unsigned long size)
|
void *vzalloc(unsigned long size)
|
||||||
{
|
{
|
||||||
return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
|
return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
|
||||||
PAGE_KERNEL);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(vzalloc);
|
EXPORT_SYMBOL(vzalloc);
|
||||||
|
|
||||||
|
@ -302,7 +301,7 @@ EXPORT_SYMBOL(vzalloc_node);
|
||||||
|
|
||||||
void *vmalloc_exec(unsigned long size)
|
void *vmalloc_exec(unsigned long size)
|
||||||
{
|
{
|
||||||
return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
|
return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -314,7 +313,7 @@ void *vmalloc_exec(unsigned long size)
|
||||||
*/
|
*/
|
||||||
void *vmalloc_32(unsigned long size)
|
void *vmalloc_32(unsigned long size)
|
||||||
{
|
{
|
||||||
return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
|
return __vmalloc(size, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(vmalloc_32);
|
EXPORT_SYMBOL(vmalloc_32);
|
||||||
|
|
||||||
|
|
|
@ -8244,7 +8244,7 @@ void *__init alloc_large_system_hash(const char *tablename,
|
||||||
table = memblock_alloc_raw(size,
|
table = memblock_alloc_raw(size,
|
||||||
SMP_CACHE_BYTES);
|
SMP_CACHE_BYTES);
|
||||||
} else if (get_order(size) >= MAX_ORDER || hashdist) {
|
} else if (get_order(size) >= MAX_ORDER || hashdist) {
|
||||||
table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
|
table = __vmalloc(size, gfp_flags);
|
||||||
virt = true;
|
virt = true;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -482,7 +482,7 @@ static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
|
||||||
if (size <= PAGE_SIZE)
|
if (size <= PAGE_SIZE)
|
||||||
return kzalloc(size, gfp);
|
return kzalloc(size, gfp);
|
||||||
else
|
else
|
||||||
return __vmalloc(size, gfp | __GFP_ZERO, PAGE_KERNEL);
|
return __vmalloc(size, gfp | __GFP_ZERO);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -2564,9 +2564,9 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
|
||||||
gfp_mask, prot, 0, node, caller);
|
gfp_mask, prot, 0, node, caller);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
|
void *__vmalloc(unsigned long size, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
|
return __vmalloc_node(size, 1, gfp_mask, PAGE_KERNEL, NUMA_NO_NODE,
|
||||||
__builtin_return_address(0));
|
__builtin_return_address(0));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__vmalloc);
|
EXPORT_SYMBOL(__vmalloc);
|
||||||
|
|
|
@ -1095,16 +1095,14 @@ static int do_replace(struct net *net, const void __user *user,
|
||||||
tmp.name[sizeof(tmp.name) - 1] = 0;
|
tmp.name[sizeof(tmp.name) - 1] = 0;
|
||||||
|
|
||||||
countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
|
countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
|
||||||
newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT,
|
newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT);
|
||||||
PAGE_KERNEL);
|
|
||||||
if (!newinfo)
|
if (!newinfo)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (countersize)
|
if (countersize)
|
||||||
memset(newinfo->counters, 0, countersize);
|
memset(newinfo->counters, 0, countersize);
|
||||||
|
|
||||||
newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT,
|
newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT);
|
||||||
PAGE_KERNEL);
|
|
||||||
if (!newinfo->entries) {
|
if (!newinfo->entries) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto free_newinfo;
|
goto free_newinfo;
|
||||||
|
|
|
@ -143,7 +143,7 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
|
||||||
break;
|
break;
|
||||||
case SNDRV_DMA_TYPE_VMALLOC:
|
case SNDRV_DMA_TYPE_VMALLOC:
|
||||||
gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL | __GFP_HIGHMEM);
|
gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL | __GFP_HIGHMEM);
|
||||||
dmab->area = __vmalloc(size, gfp, PAGE_KERNEL);
|
dmab->area = __vmalloc(size, gfp);
|
||||||
dmab->addr = 0;
|
dmab->addr = 0;
|
||||||
break;
|
break;
|
||||||
#ifdef CONFIG_HAS_DMA
|
#ifdef CONFIG_HAS_DMA
|
||||||
|
|
|
@ -460,7 +460,7 @@ int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream,
|
||||||
return 0; /* already large enough */
|
return 0; /* already large enough */
|
||||||
vfree(runtime->dma_area);
|
vfree(runtime->dma_area);
|
||||||
}
|
}
|
||||||
runtime->dma_area = __vmalloc(size, gfp_flags, PAGE_KERNEL);
|
runtime->dma_area = __vmalloc(size, gfp_flags);
|
||||||
if (!runtime->dma_area)
|
if (!runtime->dma_area)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
runtime->dma_bytes = size;
|
runtime->dma_bytes = size;
|
||||||
|
|
Loading…
Reference in New Issue