mirror of https://gitee.com/openkylin/linux.git
treewide: Use array_size() in vmalloc()
The vmalloc() function has no 2-factor argument form, so multiplication factors need to be wrapped in array_size(). This patch replaces cases of: vmalloc(a * b) with: vmalloc(array_size(a, b)) as well as handling cases of: vmalloc(a * b * c) with: vmalloc(array3_size(a, b, c)) This does, however, attempt to ignore constant size factors like: vmalloc(4 * 1024) though any constants defined via macros get caught up in the conversion. Any factors with a sizeof() of "unsigned char", "char", and "u8" were dropped, since they're redundant. The Coccinelle script used for this was: // Fix redundant parens around sizeof(). @@ type TYPE; expression THING, E; @@ ( vmalloc( - (sizeof(TYPE)) * E + sizeof(TYPE) * E , ...) | vmalloc( - (sizeof(THING)) * E + sizeof(THING) * E , ...) ) // Drop single-byte sizes and redundant parens. @@ expression COUNT; typedef u8; typedef __u8; @@ ( vmalloc( - sizeof(u8) * (COUNT) + COUNT , ...) | vmalloc( - sizeof(__u8) * (COUNT) + COUNT , ...) | vmalloc( - sizeof(char) * (COUNT) + COUNT , ...) | vmalloc( - sizeof(unsigned char) * (COUNT) + COUNT , ...) | vmalloc( - sizeof(u8) * COUNT + COUNT , ...) | vmalloc( - sizeof(__u8) * COUNT + COUNT , ...) | vmalloc( - sizeof(char) * COUNT + COUNT , ...) | vmalloc( - sizeof(unsigned char) * COUNT + COUNT , ...) ) // 2-factor product with sizeof(type/expression) and identifier or constant. @@ type TYPE; expression THING; identifier COUNT_ID; constant COUNT_CONST; @@ ( vmalloc( - sizeof(TYPE) * (COUNT_ID) + array_size(COUNT_ID, sizeof(TYPE)) , ...) | vmalloc( - sizeof(TYPE) * COUNT_ID + array_size(COUNT_ID, sizeof(TYPE)) , ...) | vmalloc( - sizeof(TYPE) * (COUNT_CONST) + array_size(COUNT_CONST, sizeof(TYPE)) , ...) | vmalloc( - sizeof(TYPE) * COUNT_CONST + array_size(COUNT_CONST, sizeof(TYPE)) , ...) | vmalloc( - sizeof(THING) * (COUNT_ID) + array_size(COUNT_ID, sizeof(THING)) , ...) | vmalloc( - sizeof(THING) * COUNT_ID + array_size(COUNT_ID, sizeof(THING)) , ...) | vmalloc( - sizeof(THING) * (COUNT_CONST) + array_size(COUNT_CONST, sizeof(THING)) , ...) | vmalloc( - sizeof(THING) * COUNT_CONST + array_size(COUNT_CONST, sizeof(THING)) , ...) ) // 2-factor product, only identifiers. @@ identifier SIZE, COUNT; @@ vmalloc( - SIZE * COUNT + array_size(COUNT, SIZE) , ...) // 3-factor product with 1 sizeof(type) or sizeof(expression), with // redundant parens removed. @@ expression THING; identifier STRIDE, COUNT; type TYPE; @@ ( vmalloc( - sizeof(TYPE) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | vmalloc( - sizeof(TYPE) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | vmalloc( - sizeof(TYPE) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | vmalloc( - sizeof(TYPE) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | vmalloc( - sizeof(THING) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | vmalloc( - sizeof(THING) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | vmalloc( - sizeof(THING) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | vmalloc( - sizeof(THING) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) ) // 3-factor product with 2 sizeof(variable), with redundant parens removed. @@ expression THING1, THING2; identifier COUNT; type TYPE1, TYPE2; @@ ( vmalloc( - sizeof(TYPE1) * sizeof(TYPE2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | vmalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | vmalloc( - sizeof(THING1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | vmalloc( - sizeof(THING1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | vmalloc( - sizeof(TYPE1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) | vmalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) ) // 3-factor product, only identifiers, with redundant parens removed. @@ identifier STRIDE, SIZE, COUNT; @@ ( vmalloc( - (COUNT) * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | vmalloc( - COUNT * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | vmalloc( - COUNT * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | vmalloc( - (COUNT) * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | vmalloc( - COUNT * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | vmalloc( - (COUNT) * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | vmalloc( - (COUNT) * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | vmalloc( - COUNT * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) ) // Any remaining multi-factor products, first at least 3-factor products // when they're not all constants... @@ expression E1, E2, E3; constant C1, C2, C3; @@ ( vmalloc(C1 * C2 * C3, ...) | vmalloc( - E1 * E2 * E3 + array3_size(E1, E2, E3) , ...) ) // And then all remaining 2 factors products when they're not all constants. @@ expression E1, E2; constant C1, C2; @@ ( vmalloc(C1 * C2, ...) | vmalloc( - E1 * E2 + array_size(E1, E2) , ...) ) Signed-off-by: Kees Cook <keescook@chromium.org>
This commit is contained in:
parent
a86854d0c5
commit
42bc47b353
|
@ -559,7 +559,8 @@ static int __init rtas_event_scan_init(void)
|
|||
rtas_error_log_max = rtas_get_error_log_max();
|
||||
rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int);
|
||||
|
||||
rtas_log_buf = vmalloc(rtas_error_log_buffer_max*LOG_NUMBER);
|
||||
rtas_log_buf = vmalloc(array_size(LOG_NUMBER,
|
||||
rtas_error_log_buffer_max));
|
||||
if (!rtas_log_buf) {
|
||||
printk(KERN_ERR "rtasd: no memory\n");
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -108,7 +108,7 @@ int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order)
|
|||
npte = 1ul << (order - 4);
|
||||
|
||||
/* Allocate reverse map array */
|
||||
rev = vmalloc(sizeof(struct revmap_entry) * npte);
|
||||
rev = vmalloc(array_size(npte, sizeof(struct revmap_entry)));
|
||||
if (!rev) {
|
||||
if (cma)
|
||||
kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT));
|
||||
|
|
|
@ -239,7 +239,7 @@ static void *page_align_ptr(void *ptr)
|
|||
static void *diag204_alloc_vbuf(int pages)
|
||||
{
|
||||
/* The buffer has to be page aligned! */
|
||||
diag204_buf_vmalloc = vmalloc(PAGE_SIZE * (pages + 1));
|
||||
diag204_buf_vmalloc = vmalloc(array_size(PAGE_SIZE, (pages + 1)));
|
||||
if (!diag204_buf_vmalloc)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
diag204_buf = page_align_ptr(diag204_buf_vmalloc);
|
||||
|
|
|
@ -123,8 +123,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
|
|||
|
||||
/* Allocate one syminfo structure per symbol. */
|
||||
me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym);
|
||||
me->arch.syminfo = vmalloc(me->arch.nsyms *
|
||||
sizeof(struct mod_arch_syminfo));
|
||||
me->arch.syminfo = vmalloc(array_size(sizeof(struct mod_arch_syminfo),
|
||||
me->arch.nsyms));
|
||||
if (!me->arch.syminfo)
|
||||
return -ENOMEM;
|
||||
symbols = (void *) hdr + symtab->sh_offset;
|
||||
|
|
|
@ -315,7 +315,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
|
|||
if (pages <= 0)
|
||||
return;
|
||||
|
||||
diag204_buf = vmalloc(PAGE_SIZE * pages);
|
||||
diag204_buf = vmalloc(array_size(pages, PAGE_SIZE));
|
||||
if (!diag204_buf)
|
||||
return;
|
||||
|
||||
|
|
|
@ -847,7 +847,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
|
|||
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
|
||||
pages = pages_array;
|
||||
if (nr_pages > ARRAY_SIZE(pages_array))
|
||||
pages = vmalloc(nr_pages * sizeof(unsigned long));
|
||||
pages = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
need_ipte_lock = psw_bits(*psw).dat && !asce.r;
|
||||
|
|
|
@ -1725,7 +1725,7 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm,
|
|||
if (args->count == 0)
|
||||
return 0;
|
||||
|
||||
bits = vmalloc(sizeof(*bits) * args->count);
|
||||
bits = vmalloc(array_size(sizeof(*bits), args->count));
|
||||
if (!bits)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -203,8 +203,9 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
|
|||
goto out;
|
||||
r = -ENOMEM;
|
||||
if (cpuid->nent) {
|
||||
cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
|
||||
cpuid->nent);
|
||||
cpuid_entries =
|
||||
vmalloc(array_size(sizeof(struct kvm_cpuid_entry),
|
||||
cpuid->nent));
|
||||
if (!cpuid_entries)
|
||||
goto out;
|
||||
r = -EFAULT;
|
||||
|
|
|
@ -403,7 +403,7 @@ static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
|
|||
fw_priv->page_array_size * 2);
|
||||
struct page **new_pages;
|
||||
|
||||
new_pages = vmalloc(new_array_size * sizeof(void *));
|
||||
new_pages = vmalloc(array_size(new_array_size, sizeof(void *)));
|
||||
if (!new_pages) {
|
||||
fw_load_abort(fw_sysfs);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -910,7 +910,8 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
|
|||
/* Called with ichan->chan_mutex held */
|
||||
static int idmac_desc_alloc(struct idmac_channel *ichan, int n)
|
||||
{
|
||||
struct idmac_tx_desc *desc = vmalloc(n * sizeof(struct idmac_tx_desc));
|
||||
struct idmac_tx_desc *desc =
|
||||
vmalloc(array_size(n, sizeof(struct idmac_tx_desc)));
|
||||
struct idmac *idmac = to_idmac(ichan->dma_chan.device);
|
||||
|
||||
if (!desc)
|
||||
|
|
|
@ -80,7 +80,7 @@ static void *agp_remap(unsigned long offset, unsigned long size,
|
|||
* page-table instead (that's probably faster anyhow...).
|
||||
*/
|
||||
/* note: use vmalloc() because num_pages could be large... */
|
||||
page_map = vmalloc(num_pages * sizeof(struct page *));
|
||||
page_map = vmalloc(array_size(num_pages, sizeof(struct page *)));
|
||||
if (!page_map)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ nv84_fence_suspend(struct nouveau_drm *drm)
|
|||
struct nv84_fence_priv *priv = drm->fence;
|
||||
int i;
|
||||
|
||||
priv->suspend = vmalloc(drm->chan.nr * sizeof(u32));
|
||||
priv->suspend = vmalloc(array_size(sizeof(u32), drm->chan.nr));
|
||||
if (priv->suspend) {
|
||||
for (i = 0; i < drm->chan.nr; i++)
|
||||
priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4);
|
||||
|
|
|
@ -241,7 +241,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
|
|||
DRM_DEBUG_DRIVER("%dx%d %d\n", mode_cmd.width,
|
||||
mode_cmd.height, mode_cmd.pitches[0]);
|
||||
|
||||
shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
|
||||
shadow = vmalloc(array_size(mode_cmd.pitches[0], mode_cmd.height));
|
||||
/* TODO: what's the usual response to memory allocation errors? */
|
||||
BUG_ON(!shadow);
|
||||
DRM_DEBUG_DRIVER("surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
|
||||
|
|
|
@ -352,8 +352,8 @@ int radeon_gart_init(struct radeon_device *rdev)
|
|||
radeon_gart_fini(rdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
|
||||
rdev->gart.num_gpu_pages);
|
||||
rdev->gart.pages_entry = vmalloc(array_size(sizeof(uint64_t),
|
||||
rdev->gart.num_gpu_pages));
|
||||
if (rdev->gart.pages_entry == NULL) {
|
||||
radeon_gart_fini(rdev);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -579,7 +579,7 @@ static int __igt_insert(unsigned int count, u64 size, bool replace)
|
|||
DRM_MM_BUG_ON(!size);
|
||||
|
||||
ret = -ENOMEM;
|
||||
nodes = vmalloc(count * sizeof(*nodes));
|
||||
nodes = vmalloc(array_size(count, sizeof(*nodes)));
|
||||
if (!nodes)
|
||||
goto err;
|
||||
|
||||
|
|
|
@ -465,7 +465,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
|
|||
gart->iovmm_base = (dma_addr_t)res_remap->start;
|
||||
gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT);
|
||||
|
||||
gart->savedata = vmalloc(sizeof(u32) * gart->page_count);
|
||||
gart->savedata = vmalloc(array_size(sizeof(u32), gart->page_count));
|
||||
if (!gart->savedata) {
|
||||
dev_err(dev, "failed to allocate context save area\n");
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -340,7 +340,7 @@ static void *bsd_alloc(struct isdn_ppp_comp_data *data)
|
|||
* Allocate space for the dictionary. This may be more than one page in
|
||||
* length.
|
||||
*/
|
||||
db->dict = vmalloc(hsize * sizeof(struct bsd_dict));
|
||||
db->dict = vmalloc(array_size(hsize, sizeof(struct bsd_dict)));
|
||||
if (!db->dict) {
|
||||
bsd_free(db);
|
||||
return NULL;
|
||||
|
@ -353,7 +353,8 @@ static void *bsd_alloc(struct isdn_ppp_comp_data *data)
|
|||
if (!decomp)
|
||||
db->lens = NULL;
|
||||
else {
|
||||
db->lens = vmalloc((maxmaxcode + 1) * sizeof(db->lens[0]));
|
||||
db->lens = vmalloc(array_size(sizeof(db->lens[0]),
|
||||
maxmaxcode + 1));
|
||||
if (!db->lens) {
|
||||
bsd_free(db);
|
||||
return (NULL);
|
||||
|
|
|
@ -88,7 +88,7 @@ static void pblk_gc_line_ws(struct work_struct *work)
|
|||
|
||||
up(&gc->gc_sem);
|
||||
|
||||
gc_rq->data = vmalloc(gc_rq->nr_secs * geo->csecs);
|
||||
gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
|
||||
if (!gc_rq->data) {
|
||||
pr_err("pblk: could not GC line:%d (%d/%d)\n",
|
||||
line->id, *line->vsc, gc_rq->nr_secs);
|
||||
|
|
|
@ -881,7 +881,8 @@ SHOW(__bch_cache)
|
|||
uint16_t q[31], *p, *cached;
|
||||
ssize_t ret;
|
||||
|
||||
cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
|
||||
cached = p = vmalloc(array_size(sizeof(uint16_t),
|
||||
ca->sb.nbuckets));
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -588,7 +588,7 @@ static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned nr
|
|||
nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
|
||||
ht->hash_bits = __ffs(nr_buckets);
|
||||
|
||||
ht->buckets = vmalloc(sizeof(*ht->buckets) * nr_buckets);
|
||||
ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets)));
|
||||
if (!ht->buckets)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -202,7 +202,7 @@ struct dm_region_hash *dm_region_hash_create(
|
|||
rh->shift = RH_HASH_SHIFT;
|
||||
rh->prime = RH_HASH_MULT;
|
||||
|
||||
rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
|
||||
rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets)));
|
||||
if (!rh->buckets) {
|
||||
DMERR("unable to allocate region hash bucket memory");
|
||||
kfree(rh);
|
||||
|
|
|
@ -114,7 +114,8 @@ static int alloc_region_table(struct dm_target *ti, unsigned nr_paths)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
sctx->region_table = vmalloc(nr_slots * sizeof(region_table_slot_t));
|
||||
sctx->region_table = vmalloc(array_size(nr_slots,
|
||||
sizeof(region_table_slot_t)));
|
||||
if (!sctx->region_table) {
|
||||
ti->error = "Cannot allocate region table";
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -2939,7 +2939,9 @@ static struct pool *pool_create(struct mapped_device *pool_md,
|
|||
goto bad_mapping_pool;
|
||||
}
|
||||
|
||||
pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE);
|
||||
pool->cell_sort_array =
|
||||
vmalloc(array_size(CELL_SORT_ARRAY_SIZE,
|
||||
sizeof(*pool->cell_sort_array)));
|
||||
if (!pool->cell_sort_array) {
|
||||
*error = "Error allocating cell sort array";
|
||||
err_p = ERR_PTR(-ENOMEM);
|
||||
|
|
|
@ -1417,7 +1417,8 @@ int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
|
|||
if (dmxdev->demux->open(dmxdev->demux) < 0)
|
||||
return -EUSERS;
|
||||
|
||||
dmxdev->filter = vmalloc(dmxdev->filternum * sizeof(struct dmxdev_filter));
|
||||
dmxdev->filter = vmalloc(array_size(sizeof(struct dmxdev_filter),
|
||||
dmxdev->filternum));
|
||||
if (!dmxdev->filter)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1247,12 +1247,14 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
|
|||
|
||||
dvbdemux->cnt_storage = NULL;
|
||||
dvbdemux->users = 0;
|
||||
dvbdemux->filter = vmalloc(dvbdemux->filternum * sizeof(struct dvb_demux_filter));
|
||||
dvbdemux->filter = vmalloc(array_size(sizeof(struct dvb_demux_filter),
|
||||
dvbdemux->filternum));
|
||||
|
||||
if (!dvbdemux->filter)
|
||||
return -ENOMEM;
|
||||
|
||||
dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed));
|
||||
dvbdemux->feed = vmalloc(array_size(sizeof(struct dvb_demux_feed),
|
||||
dvbdemux->feednum));
|
||||
if (!dvbdemux->feed) {
|
||||
vfree(dvbdemux->filter);
|
||||
dvbdemux->filter = NULL;
|
||||
|
|
|
@ -1625,7 +1625,7 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
|
|||
ret = -ENOMEM;
|
||||
meye.mchip_dev = pcidev;
|
||||
|
||||
meye.grab_temp = vmalloc(MCHIP_NB_PAGES_MJPEG * PAGE_SIZE);
|
||||
meye.grab_temp = vmalloc(array_size(PAGE_SIZE, MCHIP_NB_PAGES_MJPEG));
|
||||
if (!meye.grab_temp)
|
||||
goto outvmalloc;
|
||||
|
||||
|
|
|
@ -615,7 +615,7 @@ static int pt1_init_tables(struct pt1 *pt1)
|
|||
if (!pt1_nr_tables)
|
||||
return 0;
|
||||
|
||||
tables = vmalloc(sizeof(struct pt1_table) * pt1_nr_tables);
|
||||
tables = vmalloc(array_size(pt1_nr_tables, sizeof(struct pt1_table)));
|
||||
if (tables == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ void av7110_ipack_reset(struct ipack *p)
|
|||
int av7110_ipack_init(struct ipack *p, int size,
|
||||
void (*func)(u8 *buf, int size, void *priv))
|
||||
{
|
||||
if (!(p->buf = vmalloc(size*sizeof(u8)))) {
|
||||
if (!(p->buf = vmalloc(size))) {
|
||||
printk(KERN_WARNING "Couldn't allocate memory for ipack\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
|
@ -481,7 +481,8 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd)
|
|||
return -ENXIO;
|
||||
|
||||
icd->user_formats =
|
||||
vmalloc(fmts * sizeof(struct soc_camera_format_xlate));
|
||||
vmalloc(array_size(fmts,
|
||||
sizeof(struct soc_camera_format_xlate)));
|
||||
if (!icd->user_formats)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -100,7 +100,7 @@ static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
|
|||
|
||||
if (NULL == pages[0])
|
||||
return NULL;
|
||||
sglist = vmalloc(nr_pages * sizeof(*sglist));
|
||||
sglist = vmalloc(array_size(nr_pages, sizeof(*sglist)));
|
||||
if (NULL == sglist)
|
||||
return NULL;
|
||||
sg_init_table(sglist, nr_pages);
|
||||
|
|
|
@ -263,7 +263,7 @@ static int build_maps(partition_t *part)
|
|||
|
||||
/* Set up virtual page map */
|
||||
blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize;
|
||||
part->VirtualBlockMap = vmalloc(blocks * sizeof(uint32_t));
|
||||
part->VirtualBlockMap = vmalloc(array_size(blocks, sizeof(uint32_t)));
|
||||
if (!part->VirtualBlockMap)
|
||||
goto out_XferInfo;
|
||||
|
||||
|
|
|
@ -330,8 +330,10 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
|
|||
}
|
||||
|
||||
/* oops_page_used is a bit field */
|
||||
cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
|
||||
BITS_PER_LONG) * sizeof(unsigned long));
|
||||
cxt->oops_page_used =
|
||||
vmalloc(array_size(sizeof(unsigned long),
|
||||
DIV_ROUND_UP(mtdoops_pages,
|
||||
BITS_PER_LONG)));
|
||||
if (!cxt->oops_page_used) {
|
||||
printk(KERN_ERR "mtdoops: could not allocate page array\n");
|
||||
return;
|
||||
|
|
|
@ -1317,11 +1317,11 @@ static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
|
|||
for (i = 0; i < MTDSWAP_TREE_CNT; i++)
|
||||
d->trees[i].root = RB_ROOT;
|
||||
|
||||
d->page_data = vmalloc(sizeof(int)*pages);
|
||||
d->page_data = vmalloc(array_size(pages, sizeof(int)));
|
||||
if (!d->page_data)
|
||||
goto page_data_fail;
|
||||
|
||||
d->revmap = vmalloc(sizeof(int)*blocks);
|
||||
d->revmap = vmalloc(array_size(blocks, sizeof(int)));
|
||||
if (!d->revmap)
|
||||
goto revmap_fail;
|
||||
|
||||
|
|
|
@ -582,7 +582,7 @@ static int __init alloc_device(struct nandsim *ns)
|
|||
return 0;
|
||||
}
|
||||
|
||||
ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem));
|
||||
ns->pages = vmalloc(array_size(sizeof(union ns_mem), ns->geom.pgnum));
|
||||
if (!ns->pages) {
|
||||
NS_ERR("alloc_device: unable to allocate page array\n");
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -189,7 +189,8 @@ static int scan_header(struct partition *part)
|
|||
if (!part->blocks)
|
||||
goto err;
|
||||
|
||||
part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
|
||||
part->sector_map = vmalloc(array_size(sizeof(u_long),
|
||||
part->sector_count));
|
||||
if (!part->sector_map) {
|
||||
printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
|
||||
"sector map", part->mbd.mtd->name);
|
||||
|
|
|
@ -98,8 +98,9 @@ int octeon_init_instr_queue(struct octeon_device *oct,
|
|||
iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
|
||||
numa_node);
|
||||
if (!iq->request_list)
|
||||
iq->request_list = vmalloc(sizeof(*iq->request_list) *
|
||||
num_descs);
|
||||
iq->request_list =
|
||||
vmalloc(array_size(num_descs,
|
||||
sizeof(*iq->request_list)));
|
||||
if (!iq->request_list) {
|
||||
lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
|
||||
dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
|
||||
|
|
|
@ -558,7 +558,7 @@ static int fm10k_set_ringparam(struct net_device *netdev,
|
|||
|
||||
/* allocate temporary buffer to store rings in */
|
||||
i = max_t(int, interface->num_tx_queues, interface->num_rx_queues);
|
||||
temp_ring = vmalloc(i * sizeof(struct fm10k_ring));
|
||||
temp_ring = vmalloc(array_size(i, sizeof(struct fm10k_ring)));
|
||||
|
||||
if (!temp_ring) {
|
||||
err = -ENOMEM;
|
||||
|
|
|
@ -902,11 +902,11 @@ static int igb_set_ringparam(struct net_device *netdev,
|
|||
}
|
||||
|
||||
if (adapter->num_tx_queues > adapter->num_rx_queues)
|
||||
temp_ring = vmalloc(adapter->num_tx_queues *
|
||||
sizeof(struct igb_ring));
|
||||
temp_ring = vmalloc(array_size(sizeof(struct igb_ring),
|
||||
adapter->num_tx_queues));
|
||||
else
|
||||
temp_ring = vmalloc(adapter->num_rx_queues *
|
||||
sizeof(struct igb_ring));
|
||||
temp_ring = vmalloc(array_size(sizeof(struct igb_ring),
|
||||
adapter->num_rx_queues));
|
||||
|
||||
if (!temp_ring) {
|
||||
err = -ENOMEM;
|
||||
|
|
|
@ -1063,7 +1063,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
|||
/* allocate temporary buffer to store rings in */
|
||||
i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
|
||||
adapter->num_rx_queues);
|
||||
temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
|
||||
temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring)));
|
||||
|
||||
if (!temp_ring) {
|
||||
err = -ENOMEM;
|
||||
|
|
|
@ -282,8 +282,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
|
|||
}
|
||||
|
||||
if (new_tx_count != adapter->tx_ring_count) {
|
||||
tx_ring = vmalloc((adapter->num_tx_queues +
|
||||
adapter->num_xdp_queues) * sizeof(*tx_ring));
|
||||
tx_ring = vmalloc(array_size(sizeof(*tx_ring),
|
||||
adapter->num_tx_queues +
|
||||
adapter->num_xdp_queues));
|
||||
if (!tx_ring) {
|
||||
err = -ENOMEM;
|
||||
goto clear_reset;
|
||||
|
@ -327,7 +328,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
|
|||
}
|
||||
|
||||
if (new_rx_count != adapter->rx_ring_count) {
|
||||
rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring));
|
||||
rx_ring = vmalloc(array_size(sizeof(*rx_ring),
|
||||
adapter->num_rx_queues));
|
||||
if (!rx_ring) {
|
||||
err = -ENOMEM;
|
||||
goto clear_reset;
|
||||
|
|
|
@ -417,7 +417,8 @@ int nfp_flower_metadata_init(struct nfp_app *app)
|
|||
|
||||
/* Init ring buffer and unallocated stats_ids. */
|
||||
priv->stats_ids.free_list.buf =
|
||||
vmalloc(NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
|
||||
vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
|
||||
NFP_FL_STATS_ENTRY_RS));
|
||||
if (!priv->stats_ids.free_list.buf)
|
||||
goto err_free_last_used;
|
||||
|
||||
|
|
|
@ -406,7 +406,7 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
|
|||
* Allocate space for the dictionary. This may be more than one page in
|
||||
* length.
|
||||
*/
|
||||
db->dict = vmalloc(hsize * sizeof(struct bsd_dict));
|
||||
db->dict = vmalloc(array_size(hsize, sizeof(struct bsd_dict)));
|
||||
if (!db->dict)
|
||||
{
|
||||
bsd_free (db);
|
||||
|
@ -425,7 +425,7 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
|
|||
*/
|
||||
else
|
||||
{
|
||||
db->lens = vmalloc((maxmaxcode + 1) * sizeof(db->lens[0]));
|
||||
db->lens = vmalloc(array_size(sizeof(db->lens[0]), (maxmaxcode + 1)));
|
||||
if (!db->lens)
|
||||
{
|
||||
bsd_free (db);
|
||||
|
|
|
@ -931,7 +931,7 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
|
|||
|
||||
/* Create buffer and read in eeprom */
|
||||
|
||||
buf = vmalloc(eesize * 2);
|
||||
buf = vmalloc(array_size(eesize, 2));
|
||||
if (!buf) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
|
|
|
@ -4242,8 +4242,8 @@ int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter)
|
|||
* additional active scan request for hidden SSIDs on passive channels.
|
||||
*/
|
||||
adapter->num_in_chan_stats = 2 * (n_channels_bg + n_channels_a);
|
||||
adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) *
|
||||
adapter->num_in_chan_stats);
|
||||
adapter->chan_stats = vmalloc(array_size(sizeof(*adapter->chan_stats),
|
||||
adapter->num_in_chan_stats));
|
||||
|
||||
if (!adapter->chan_stats)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -91,7 +91,7 @@ int alloc_event_buffer(void)
|
|||
return -EINVAL;
|
||||
|
||||
buffer_pos = 0;
|
||||
event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
|
||||
event_buffer = vmalloc(array_size(buffer_size, sizeof(unsigned long)));
|
||||
if (!event_buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -975,7 +975,7 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
|
|||
priv->md->properties.transfer_mode) == 0)
|
||||
return -ENODEV;
|
||||
|
||||
transfer = vmalloc(transaction.count * sizeof(*transfer));
|
||||
transfer = vmalloc(array_size(sizeof(*transfer), transaction.count));
|
||||
if (!transfer)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -233,8 +233,8 @@ static int fnic_trace_debugfs_open(struct inode *inode,
|
|||
return -ENOMEM;
|
||||
|
||||
if (*rdata_ptr == fc_trc_flag->fnic_trace) {
|
||||
fnic_dbg_prt->buffer = vmalloc(3 *
|
||||
(trace_max_pages * PAGE_SIZE));
|
||||
fnic_dbg_prt->buffer = vmalloc(array3_size(3, trace_max_pages,
|
||||
PAGE_SIZE));
|
||||
if (!fnic_dbg_prt->buffer) {
|
||||
kfree(fnic_dbg_prt);
|
||||
return -ENOMEM;
|
||||
|
@ -244,7 +244,8 @@ static int fnic_trace_debugfs_open(struct inode *inode,
|
|||
fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
|
||||
} else {
|
||||
fnic_dbg_prt->buffer =
|
||||
vmalloc(3 * (fnic_fc_trace_max_pages * PAGE_SIZE));
|
||||
vmalloc(array3_size(3, fnic_fc_trace_max_pages,
|
||||
PAGE_SIZE));
|
||||
if (!fnic_dbg_prt->buffer) {
|
||||
kfree(fnic_dbg_prt);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -477,8 +477,9 @@ int fnic_trace_buf_init(void)
|
|||
}
|
||||
memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE));
|
||||
|
||||
fnic_trace_entries.page_offset = vmalloc(fnic_max_trace_entries *
|
||||
sizeof(unsigned long));
|
||||
fnic_trace_entries.page_offset =
|
||||
vmalloc(array_size(fnic_max_trace_entries,
|
||||
sizeof(unsigned long)));
|
||||
if (!fnic_trace_entries.page_offset) {
|
||||
printk(KERN_ERR PFX "Failed to allocate memory for"
|
||||
" page_offset\n");
|
||||
|
@ -555,8 +556,9 @@ int fnic_fc_trace_init(void)
|
|||
|
||||
fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/
|
||||
FC_TRC_SIZE_BYTES;
|
||||
fnic_fc_ctlr_trace_buf_p = (unsigned long)vmalloc(
|
||||
fnic_fc_trace_max_pages * PAGE_SIZE);
|
||||
fnic_fc_ctlr_trace_buf_p =
|
||||
(unsigned long)vmalloc(array_size(PAGE_SIZE,
|
||||
fnic_fc_trace_max_pages));
|
||||
if (!fnic_fc_ctlr_trace_buf_p) {
|
||||
pr_err("fnic: Failed to allocate memory for "
|
||||
"FC Control Trace Buf\n");
|
||||
|
@ -568,8 +570,9 @@ int fnic_fc_trace_init(void)
|
|||
fnic_fc_trace_max_pages * PAGE_SIZE);
|
||||
|
||||
/* Allocate memory for page offset */
|
||||
fc_trace_entries.page_offset = vmalloc(fc_trace_max_entries *
|
||||
sizeof(unsigned long));
|
||||
fc_trace_entries.page_offset =
|
||||
vmalloc(array_size(fc_trace_max_entries,
|
||||
sizeof(unsigned long)));
|
||||
if (!fc_trace_entries.page_offset) {
|
||||
pr_err("fnic:Failed to allocate memory for page_offset\n");
|
||||
if (fnic_fc_ctlr_trace_buf_p) {
|
||||
|
|
|
@ -4331,9 +4331,11 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
|
|||
}
|
||||
|
||||
if (ioa_cfg->sis64)
|
||||
ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
|
||||
ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
|
||||
sizeof(__be32 *)));
|
||||
else
|
||||
ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
|
||||
ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
|
||||
sizeof(__be32 *)));
|
||||
|
||||
if (!ioa_data) {
|
||||
ipr_err("Dump memory allocation failed\n");
|
||||
|
|
|
@ -1488,7 +1488,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct osst
|
|||
int dbg = debugging;
|
||||
#endif
|
||||
|
||||
if ((buffer = vmalloc((nframes + 1) * OS_DATA_SIZE)) == NULL)
|
||||
if ((buffer = vmalloc(array_size((nframes + 1), OS_DATA_SIZE))) == NULL)
|
||||
return (-EIO);
|
||||
|
||||
printk(KERN_INFO "%s:I: Reading back %d frames from drive buffer%s\n",
|
||||
|
|
|
@ -5439,7 +5439,8 @@ static int __init scsi_debug_init(void)
|
|||
}
|
||||
|
||||
map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
|
||||
map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
|
||||
map_storep = vmalloc(array_size(sizeof(long),
|
||||
BITS_TO_LONGS(map_size)));
|
||||
|
||||
pr_info("%lu provisioning blocks\n", map_size);
|
||||
|
||||
|
|
|
@ -25,7 +25,8 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
|
|||
pgprot_t pgprot;
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
|
||||
struct page **pages = vmalloc(sizeof(struct page *) * npages);
|
||||
struct page **pages = vmalloc(array_size(npages,
|
||||
sizeof(struct page *)));
|
||||
struct page **tmp = pages;
|
||||
|
||||
if (!pages)
|
||||
|
|
|
@ -1175,8 +1175,9 @@ static int gb_camera_debugfs_init(struct gb_camera *gcam)
|
|||
|
||||
gcam->debugfs.root = debugfs_create_dir(dirname, gb_debugfs_get());
|
||||
|
||||
gcam->debugfs.buffers = vmalloc(sizeof(*gcam->debugfs.buffers) *
|
||||
GB_CAMERA_DEBUGFS_BUFFER_MAX);
|
||||
gcam->debugfs.buffers =
|
||||
vmalloc(array_size(GB_CAMERA_DEBUGFS_BUFFER_MAX,
|
||||
sizeof(*gcam->debugfs.buffers)));
|
||||
if (!gcam->debugfs.buffers)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1220,7 +1220,8 @@ static int setup_window(struct zoran_fh *fh,
|
|||
}
|
||||
} else if (clipcount) {
|
||||
/* write our own bitmap from the clips */
|
||||
vcp = vmalloc(sizeof(struct v4l2_clip) * (clipcount + 4));
|
||||
vcp = vmalloc(array_size(sizeof(struct v4l2_clip),
|
||||
clipcount + 4));
|
||||
if (vcp == NULL) {
|
||||
dprintk(1,
|
||||
KERN_ERR
|
||||
|
|
|
@ -2618,7 +2618,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
|
|||
segment = &ms_card->segment[seg_no];
|
||||
|
||||
if (!segment->l2p_table) {
|
||||
segment->l2p_table = vmalloc(table_size * 2);
|
||||
segment->l2p_table = vmalloc(array_size(table_size, 2));
|
||||
if (!segment->l2p_table) {
|
||||
rtsx_trace(chip);
|
||||
goto BUILD_FAIL;
|
||||
|
|
|
@ -1721,7 +1721,7 @@ int rtsx_read_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
|
|||
|
||||
dev_dbg(rtsx_dev(chip), "dw_len = %d\n", dw_len);
|
||||
|
||||
data = vmalloc(dw_len * 4);
|
||||
data = vmalloc(array_size(dw_len, 4));
|
||||
if (!data) {
|
||||
rtsx_trace(chip);
|
||||
return STATUS_NOMEM;
|
||||
|
|
|
@ -1243,7 +1243,7 @@ sisusbcon_font_set(struct vc_data *c, struct console_font *font,
|
|||
}
|
||||
|
||||
if (!sisusb->font_backup)
|
||||
sisusb->font_backup = vmalloc(charcount * 32);
|
||||
sisusb->font_backup = vmalloc(array_size(charcount, 32));
|
||||
|
||||
if (sisusb->font_backup) {
|
||||
memcpy(sisusb->font_backup, font->data, charcount * 32);
|
||||
|
|
|
@ -412,7 +412,7 @@ static int xenfb_probe(struct xenbus_device *dev,
|
|||
|
||||
info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
info->gfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
|
||||
info->gfns = vmalloc(array_size(sizeof(unsigned long), info->nr_pages));
|
||||
if (!info->gfns)
|
||||
goto error_nomem;
|
||||
|
||||
|
|
|
@ -2294,7 +2294,7 @@ static int elf_core_dump(struct coredump_params *cprm)
|
|||
|
||||
if (segs - 1 > ULONG_MAX / sizeof(*vma_filesz))
|
||||
goto end_coredump;
|
||||
vma_filesz = vmalloc((segs - 1) * sizeof(*vma_filesz));
|
||||
vma_filesz = vmalloc(array_size(sizeof(*vma_filesz), (segs - 1)));
|
||||
if (!vma_filesz)
|
||||
goto end_coredump;
|
||||
|
||||
|
|
|
@ -789,7 +789,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
|
|||
GFP_KERNEL);
|
||||
|
||||
if (!bv) {
|
||||
bv = vmalloc(max_pages * sizeof(struct bio_vec));
|
||||
bv = vmalloc(array_size(max_pages, sizeof(struct bio_vec)));
|
||||
if (!bv)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -799,7 +799,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
|
|||
GFP_KERNEL);
|
||||
|
||||
if (!pages) {
|
||||
pages = vmalloc(max_pages * sizeof(struct page *));
|
||||
pages = vmalloc(array_size(max_pages, sizeof(struct page *)));
|
||||
if (!pages) {
|
||||
kvfree(bv);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -517,7 +517,7 @@ static int new_lockspace(const char *name, const char *cluster,
|
|||
size = dlm_config.ci_rsbtbl_size;
|
||||
ls->ls_rsbtbl_size = size;
|
||||
|
||||
ls->ls_rsbtbl = vmalloc(sizeof(struct dlm_rsbtable) * size);
|
||||
ls->ls_rsbtbl = vmalloc(array_size(size, sizeof(struct dlm_rsbtable)));
|
||||
if (!ls->ls_rsbtbl)
|
||||
goto out_lsfree;
|
||||
for (i = 0; i < size; i++) {
|
||||
|
|
|
@ -1456,7 +1456,7 @@ int reiserfs_init_bitmap_cache(struct super_block *sb)
|
|||
struct reiserfs_bitmap_info *bitmap;
|
||||
unsigned int bmap_nr = reiserfs_bmap_count(sb);
|
||||
|
||||
bitmap = vmalloc(sizeof(*bitmap) * bmap_nr);
|
||||
bitmap = vmalloc(array_size(bmap_nr, sizeof(*bitmap)));
|
||||
if (bitmap == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -632,7 +632,8 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
|
|||
pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_KERNEL);
|
||||
nnode = kzalloc(sizeof(struct ubifs_nnode), GFP_KERNEL);
|
||||
buf = vmalloc(c->leb_size);
|
||||
ltab = vmalloc(sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs);
|
||||
ltab = vmalloc(array_size(sizeof(struct ubifs_lpt_lprops),
|
||||
c->lpt_lebs));
|
||||
if (!pnode || !nnode || !buf || !ltab || !lsave) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -1626,7 +1627,8 @@ static int lpt_init_rd(struct ubifs_info *c)
|
|||
{
|
||||
int err, i;
|
||||
|
||||
c->ltab = vmalloc(sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs);
|
||||
c->ltab = vmalloc(array_size(sizeof(struct ubifs_lpt_lprops),
|
||||
c->lpt_lebs));
|
||||
if (!c->ltab)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1690,7 +1692,8 @@ static int lpt_init_wr(struct ubifs_info *c)
|
|||
{
|
||||
int err, i;
|
||||
|
||||
c->ltab_cmt = vmalloc(sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs);
|
||||
c->ltab_cmt = vmalloc(array_size(sizeof(struct ubifs_lpt_lprops),
|
||||
c->lpt_lebs));
|
||||
if (!c->ltab_cmt)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -195,7 +195,7 @@ struct cgroup_pidlist {
|
|||
static void *pidlist_allocate(int count)
|
||||
{
|
||||
if (PIDLIST_TOO_LARGE(count))
|
||||
return vmalloc(count * sizeof(pid_t));
|
||||
return vmalloc(array_size(count, sizeof(pid_t)));
|
||||
else
|
||||
return kmalloc_array(count, sizeof(pid_t), GFP_KERNEL);
|
||||
}
|
||||
|
|
|
@ -698,7 +698,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
|
|||
goto out_clean;
|
||||
}
|
||||
|
||||
data = vmalloc(sizeof(*data) * nr_threads);
|
||||
data = vmalloc(array_size(nr_threads, sizeof(*data)));
|
||||
if (!data) {
|
||||
pr_err("Failed to allocate LZO data\n");
|
||||
ret = -ENOMEM;
|
||||
|
@ -1183,14 +1183,14 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
|||
nr_threads = num_online_cpus() - 1;
|
||||
nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
|
||||
|
||||
page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
|
||||
page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
|
||||
if (!page) {
|
||||
pr_err("Failed to allocate LZO page\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_clean;
|
||||
}
|
||||
|
||||
data = vmalloc(sizeof(*data) * nr_threads);
|
||||
data = vmalloc(array_size(nr_threads, sizeof(*data)));
|
||||
if (!data) {
|
||||
pr_err("Failed to allocate LZO data\n");
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -831,8 +831,9 @@ rcu_torture_cbflood(void *arg)
|
|||
cbflood_intra_holdoff > 0 &&
|
||||
cur_ops->call &&
|
||||
cur_ops->cb_barrier) {
|
||||
rhp = vmalloc(sizeof(*rhp) *
|
||||
cbflood_n_burst * cbflood_n_per_burst);
|
||||
rhp = vmalloc(array3_size(cbflood_n_burst,
|
||||
cbflood_n_per_burst,
|
||||
sizeof(*rhp)));
|
||||
err = !rhp;
|
||||
}
|
||||
if (err) {
|
||||
|
|
|
@ -1075,7 +1075,7 @@ int tracing_map_sort_entries(struct tracing_map *map,
|
|||
struct tracing_map_sort_entry *sort_entry, **entries;
|
||||
int i, n_entries, ret;
|
||||
|
||||
entries = vmalloc(map->max_elts * sizeof(sort_entry));
|
||||
entries = vmalloc(array_size(sizeof(sort_entry), map->max_elts));
|
||||
if (!entries)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -144,7 +144,7 @@ static int percpu_stats_show(struct seq_file *m, void *v)
|
|||
spin_unlock_irq(&pcpu_lock);
|
||||
|
||||
/* there can be at most this many free and allocated fragments */
|
||||
buffer = vmalloc((2 * max_nr_alloc + 1) * sizeof(int));
|
||||
buffer = vmalloc(array_size(sizeof(int), (2 * max_nr_alloc + 1)));
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -903,12 +903,13 @@ static int translate_table(struct net *net, const char *name,
|
|||
* if an error occurs
|
||||
*/
|
||||
newinfo->chainstack =
|
||||
vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
|
||||
vmalloc(array_size(nr_cpu_ids,
|
||||
sizeof(*(newinfo->chainstack))));
|
||||
if (!newinfo->chainstack)
|
||||
return -ENOMEM;
|
||||
for_each_possible_cpu(i) {
|
||||
newinfo->chainstack[i] =
|
||||
vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
|
||||
vmalloc(array_size(udc_cnt, sizeof(*(newinfo->chainstack[0]))));
|
||||
if (!newinfo->chainstack[i]) {
|
||||
while (i)
|
||||
vfree(newinfo->chainstack[--i]);
|
||||
|
@ -918,7 +919,7 @@ static int translate_table(struct net *net, const char *name,
|
|||
}
|
||||
}
|
||||
|
||||
cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
|
||||
cl_s = vmalloc(array_size(udc_cnt, sizeof(*cl_s)));
|
||||
if (!cl_s)
|
||||
return -ENOMEM;
|
||||
i = 0; /* the i'th udc */
|
||||
|
@ -1293,7 +1294,7 @@ static int do_update_counters(struct net *net, const char *name,
|
|||
if (num_counters == 0)
|
||||
return -EINVAL;
|
||||
|
||||
tmp = vmalloc(num_counters * sizeof(*tmp));
|
||||
tmp = vmalloc(array_size(num_counters, sizeof(*tmp)));
|
||||
if (!tmp)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1434,7 +1435,7 @@ static int copy_counters_to_user(struct ebt_table *t,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
counterstmp = vmalloc(nentries * sizeof(*counterstmp));
|
||||
counterstmp = vmalloc(array_size(nentries, sizeof(*counterstmp)));
|
||||
if (!counterstmp)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1380,7 +1380,8 @@ int __init ip_vs_conn_init(void)
|
|||
/*
|
||||
* Allocate the connection hash table and initialize its list heads
|
||||
*/
|
||||
ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size * sizeof(*ip_vs_conn_tab));
|
||||
ip_vs_conn_tab = vmalloc(array_size(ip_vs_conn_tab_size,
|
||||
sizeof(*ip_vs_conn_tab)));
|
||||
if (!ip_vs_conn_tab)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -389,7 +389,8 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
|
|||
if (snd_BUG_ON(!pool))
|
||||
return -EINVAL;
|
||||
|
||||
cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
|
||||
cellptr = vmalloc(array_size(sizeof(struct snd_seq_event_cell),
|
||||
pool->size));
|
||||
if (!cellptr)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -240,8 +240,9 @@ struct dsp_spos_instance *cs46xx_dsp_spos_create (struct snd_cs46xx * chip)
|
|||
return NULL;
|
||||
|
||||
/* better to use vmalloc for this big table */
|
||||
ins->symbol_table.symbols = vmalloc(sizeof(struct dsp_symbol_entry) *
|
||||
DSP_MAX_SYMBOLS);
|
||||
ins->symbol_table.symbols =
|
||||
vmalloc(array_size(DSP_MAX_SYMBOLS,
|
||||
sizeof(struct dsp_symbol_entry)));
|
||||
ins->code.data = kmalloc(DSP_CODE_BYTE_SIZE, GFP_KERNEL);
|
||||
ins->modules = kmalloc_array(DSP_MAX_MODULES,
|
||||
sizeof(struct dsp_module_desc),
|
||||
|
|
|
@ -1941,9 +1941,10 @@ int snd_emu10k1_create(struct snd_card *card,
|
|||
(unsigned long)emu->ptb_pages.addr,
|
||||
(unsigned long)(emu->ptb_pages.addr + emu->ptb_pages.bytes));
|
||||
|
||||
emu->page_ptr_table = vmalloc(emu->max_cache_pages * sizeof(void *));
|
||||
emu->page_addr_table = vmalloc(emu->max_cache_pages *
|
||||
sizeof(unsigned long));
|
||||
emu->page_ptr_table = vmalloc(array_size(sizeof(void *),
|
||||
emu->max_cache_pages));
|
||||
emu->page_addr_table = vmalloc(array_size(sizeof(unsigned long),
|
||||
emu->max_cache_pages));
|
||||
if (emu->page_ptr_table == NULL || emu->page_addr_table == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto error;
|
||||
|
@ -2099,7 +2100,7 @@ static int alloc_pm_buffer(struct snd_emu10k1 *emu)
|
|||
size = ARRAY_SIZE(saved_regs);
|
||||
if (emu->audigy)
|
||||
size += ARRAY_SIZE(saved_regs_audigy);
|
||||
emu->saved_ptr = vmalloc(4 * NUM_G * size);
|
||||
emu->saved_ptr = vmalloc(array3_size(4, NUM_G, size));
|
||||
if (!emu->saved_ptr)
|
||||
return -ENOMEM;
|
||||
if (snd_emu10k1_efx_alloc_pm_buffer(emu) < 0)
|
||||
|
|
|
@ -2692,7 +2692,7 @@ int snd_emu10k1_efx_alloc_pm_buffer(struct snd_emu10k1 *emu)
|
|||
if (! emu->tram_val_saved || ! emu->tram_addr_saved)
|
||||
return -ENOMEM;
|
||||
len = emu->audigy ? 2 * 1024 : 2 * 512;
|
||||
emu->saved_icode = vmalloc(len * 4);
|
||||
emu->saved_icode = vmalloc(array_size(len, 4));
|
||||
if (! emu->saved_icode)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
|
|
|
@ -874,7 +874,7 @@ int snd_p16v_mixer(struct snd_emu10k1 *emu)
|
|||
|
||||
int snd_p16v_alloc_pm_buffer(struct snd_emu10k1 *emu)
|
||||
{
|
||||
emu->p16v_saved = vmalloc(NUM_CHS * 4 * 0x80);
|
||||
emu->p16v_saved = vmalloc(array_size(NUM_CHS * 4, 0x80));
|
||||
if (! emu->p16v_saved)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
|
|
|
@ -2657,7 +2657,10 @@ snd_m3_create(struct snd_card *card, struct pci_dev *pci,
|
|||
chip->irq = pci->irq;
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
chip->suspend_mem = vmalloc(sizeof(u16) * (REV_B_CODE_MEMORY_LENGTH + REV_B_DATA_MEMORY_LENGTH));
|
||||
chip->suspend_mem =
|
||||
vmalloc(array_size(sizeof(u16),
|
||||
REV_B_CODE_MEMORY_LENGTH +
|
||||
REV_B_DATA_MEMORY_LENGTH));
|
||||
if (chip->suspend_mem == NULL)
|
||||
dev_warn(card->dev, "can't allocate apm buffer\n");
|
||||
#endif
|
||||
|
|
|
@ -3362,7 +3362,9 @@ static int snd_trident_tlb_alloc(struct snd_trident *trident)
|
|||
trident->tlb.entries = (unsigned int*)ALIGN((unsigned long)trident->tlb.buffer.area, SNDRV_TRIDENT_MAX_PAGES * 4);
|
||||
trident->tlb.entries_dmaaddr = ALIGN(trident->tlb.buffer.addr, SNDRV_TRIDENT_MAX_PAGES * 4);
|
||||
/* allocate shadow TLB page table (virtual addresses) */
|
||||
trident->tlb.shadow_entries = vmalloc(SNDRV_TRIDENT_MAX_PAGES*sizeof(unsigned long));
|
||||
trident->tlb.shadow_entries =
|
||||
vmalloc(array_size(SNDRV_TRIDENT_MAX_PAGES,
|
||||
sizeof(unsigned long)));
|
||||
if (!trident->tlb.shadow_entries)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -3059,7 +3059,8 @@ static long kvm_vm_ioctl(struct file *filp,
|
|||
goto out;
|
||||
if (routing.nr) {
|
||||
r = -ENOMEM;
|
||||
entries = vmalloc(routing.nr * sizeof(*entries));
|
||||
entries = vmalloc(array_size(sizeof(*entries),
|
||||
routing.nr));
|
||||
if (!entries)
|
||||
goto out;
|
||||
r = -EFAULT;
|
||||
|
|
Loading…
Reference in New Issue