- Error path bug fix for overflow tests (Dan)

- Additional struct_size() conversions (Matthew, Kees)
 - Explicitly reported overflow fixes (Silvio, Kees)
 - Add missing kvcalloc() function (Kees)
 - Treewide conversions of allocators to use either 2-factor argument
   variant when available, or array_size() and array3_size() as needed (Kees)
 -----BEGIN PGP SIGNATURE-----
 Comment: Kees Cook <kees@outflux.net>
 
 iQJKBAABCgA0FiEEpcP2jyKd1g9yPm4TiXL039xtwCYFAlsgVtMWHGtlZXNjb29r
 QGNocm9taXVtLm9yZwAKCRCJcvTf3G3AJhsJEACLYe2EbwLFJz7emOT1KUGK5R1b
 oVxJog0893WyMqgk9XBlA2lvTBRBYzR3tzsadfYo87L3VOBzazUv0YZaweJb65sF
 bAvxW3nY06brhKKwTRed1PrMa1iG9R63WISnNAuZAq7+79mN6YgW4G6YSAEF9lW7
 oPJoPw93YxcI8JcG+dA8BC9w7pJFKooZH4gvLUSUNl5XKr8Ru5YnWcV8F+8M4vZI
 EJtXFmdlmxAledUPxTSCIojO8m/tNOjYTreBJt9K1DXKY6UcgAdhk75TRLEsp38P
 fPvMigYQpBDnYz2pi9ourTgvZLkffK1OBZ46PPt8BgUZVf70D6CBg10vK47KO6N2
 zreloxkMTrz5XohyjfNjYFRkyyuwV2sSVrRJqF4dpyJ4NJQRjvyywxIP4Myifwlb
 ONipCM1EjvQjaEUbdcqKgvlooMdhcyxfshqJWjHzXB6BL22uPzq5jHXXugz8/ol8
 tOSM2FuJ2sBLQso+szhisxtMd11PihzIZK9BfxEG3du+/hlI+2XgN7hnmlXuA2k3
 BUW6BSDhab41HNd6pp50bDJnL0uKPWyFC6hqSNZw+GOIb46jfFcQqnCB3VZGCwj3
 LH53Be1XlUrttc/NrtkvVhm4bdxtfsp4F7nsPFNDuHvYNkalAVoC3An0BzOibtkh
 AtfvEeaPHaOyD8/h2Q==
 =zUUp
 -----END PGP SIGNATURE-----

Merge tag 'overflow-v4.18-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull more overflow updates from Kees Cook:
 "The rest of the overflow changes for v4.18-rc1.

  This includes the explicit overflow fixes from Silvio, further
  struct_size() conversions from Matthew, and a bug fix from Dan.

  But the bulk of it is the treewide conversions to use either the
  2-factor argument allocators (e.g. kmalloc(a * b, ...) into
  kmalloc_array(a, b, ...) or the array_size() macros (e.g. vmalloc(a *
  b) into vmalloc(array_size(a, b)).

  Coccinelle was fighting me on several fronts, so I've done a bunch of
  manual whitespace updates in the patches as well.

  Summary:

   - Error path bug fix for overflow tests (Dan)

   - Additional struct_size() conversions (Matthew, Kees)

   - Explicitly reported overflow fixes (Silvio, Kees)

   - Add missing kvcalloc() function (Kees)

   - Treewide conversions of allocators to use either 2-factor argument
     variant when available, or array_size() and array3_size() as needed
     (Kees)"

* tag 'overflow-v4.18-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (26 commits)
  treewide: Use array_size in f2fs_kvzalloc()
  treewide: Use array_size() in f2fs_kzalloc()
  treewide: Use array_size() in f2fs_kmalloc()
  treewide: Use array_size() in sock_kmalloc()
  treewide: Use array_size() in kvzalloc_node()
  treewide: Use array_size() in vzalloc_node()
  treewide: Use array_size() in vzalloc()
  treewide: Use array_size() in vmalloc()
  treewide: devm_kzalloc() -> devm_kcalloc()
  treewide: devm_kmalloc() -> devm_kmalloc_array()
  treewide: kvzalloc() -> kvcalloc()
  treewide: kvmalloc() -> kvmalloc_array()
  treewide: kzalloc_node() -> kcalloc_node()
  treewide: kzalloc() -> kcalloc()
  treewide: kmalloc() -> kmalloc_array()
  mm: Introduce kvcalloc()
  video: uvesafb: Fix integer overflow in allocation
  UBIFS: Fix potential integer overflow in allocation
  leds: Use struct_size() in allocation
  Convert intel uncore to struct_size
  ...
This commit is contained in:
Linus Torvalds 2018-06-12 18:28:00 -07:00
commit b08fc5277a
1201 changed files with 3586 additions and 2796 deletions

View File

@ -286,7 +286,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
return -EINVAL;
if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
return -EFAULT;
kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
kbuf = kmalloc_array(maxevents, sizeof(*kbuf), GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
fs = get_fs();
@ -324,7 +324,7 @@ asmlinkage long sys_oabi_semtimedop(int semid,
return -EINVAL;
if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
return -EFAULT;
sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
sops = kmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
if (!sops)
return -ENOMEM;
err = 0;

View File

@ -252,7 +252,7 @@ int __init dc21285_setup(int nr, struct pci_sys_data *sys)
if (nr || !footbridge_cfn_mode())
return 0;
res = kzalloc(sizeof(struct resource) * 2, GFP_KERNEL);
res = kcalloc(2, sizeof(struct resource), GFP_KERNEL);
if (!res) {
printk("out of memory for root bus resources");
return 0;

View File

@ -421,7 +421,7 @@ int ixp4xx_setup(int nr, struct pci_sys_data *sys)
if (nr >= 1)
return 0;
res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
res = kcalloc(2, sizeof(*res), GFP_KERNEL);
if (res == NULL) {
/*
* If we're out of memory this early, something is wrong,

View File

@ -389,7 +389,7 @@ static void omap_mcbsp_register_board_cfg(struct resource *res, int res_count,
{
int i;
omap_mcbsp_devices = kzalloc(size * sizeof(struct platform_device *),
omap_mcbsp_devices = kcalloc(size, sizeof(struct platform_device *),
GFP_KERNEL);
if (!omap_mcbsp_devices) {
printk(KERN_ERR "Could not register McBSP devices\n");

View File

@ -34,7 +34,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
{
char *hc_name;
hc_name = kzalloc(sizeof(char) * (HSMMC_NAME_LEN + 1), GFP_KERNEL);
hc_name = kzalloc(HSMMC_NAME_LEN + 1, GFP_KERNEL);
if (!hc_name) {
kfree(hc_name);
return -ENOMEM;

View File

@ -162,7 +162,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
!omap_hwmod_parse_module_range(NULL, node, &res))
return -ENODEV;
hwmods = kzalloc(sizeof(struct omap_hwmod *) * oh_cnt, GFP_KERNEL);
hwmods = kcalloc(oh_cnt, sizeof(struct omap_hwmod *), GFP_KERNEL);
if (!hwmods) {
ret = -ENOMEM;
goto odbfd_exit;
@ -413,7 +413,7 @@ omap_device_copy_resources(struct omap_hwmod *oh,
goto error;
}
res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
res = kcalloc(2, sizeof(*res), GFP_KERNEL);
if (!res)
return -ENOMEM;

View File

@ -285,10 +285,11 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
prcm_irq_setup = irq_setup;
prcm_irq_chips = kzalloc(sizeof(void *) * nr_regs, GFP_KERNEL);
prcm_irq_setup->saved_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL);
prcm_irq_setup->priority_mask = kzalloc(sizeof(u32) * nr_regs,
GFP_KERNEL);
prcm_irq_chips = kcalloc(nr_regs, sizeof(void *), GFP_KERNEL);
prcm_irq_setup->saved_mask = kcalloc(nr_regs, sizeof(u32),
GFP_KERNEL);
prcm_irq_setup->priority_mask = kcalloc(nr_regs, sizeof(u32),
GFP_KERNEL);
if (!prcm_irq_chips || !prcm_irq_setup->saved_mask ||
!prcm_irq_setup->priority_mask)

View File

@ -403,7 +403,7 @@ static int ve_spc_populate_opps(uint32_t cluster)
uint32_t data = 0, off, ret, idx;
struct ve_spc_opp *opps;
opps = kzalloc(sizeof(*opps) * MAX_OPPS, GFP_KERNEL);
opps = kcalloc(MAX_OPPS, sizeof(*opps), GFP_KERNEL);
if (!opps)
return -ENOMEM;

View File

@ -2162,8 +2162,8 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
goto err;
mapping->bitmap_size = bitmap_size;
mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *),
GFP_KERNEL);
mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *),
GFP_KERNEL);
if (!mapping->bitmaps)
goto err2;

View File

@ -20,7 +20,7 @@
#include "mm.h"
#ifdef CONFIG_ARM_LPAE
#define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL)
#define __pgd_alloc() kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL)
#define __pgd_free(pgd) kfree(pgd)
#else
#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2)

View File

@ -766,8 +766,9 @@ static int coverage_start_fn(const struct decode_header *h, void *args)
static int coverage_start(const union decode_item *table)
{
coverage.base = kmalloc(MAX_COVERAGE_ENTRIES *
sizeof(struct coverage_entry), GFP_KERNEL);
coverage.base = kmalloc_array(MAX_COVERAGE_ENTRIES,
sizeof(struct coverage_entry),
GFP_KERNEL);
coverage.num_entries = 0;
coverage.nesting = 0;
return table_iter(table, coverage_start_fn, &coverage);

View File

@ -234,8 +234,8 @@ static void __init register_insn_emulation_sysctl(void)
struct insn_emulation *insn;
struct ctl_table *insns_sysctl, *sysctl;
insns_sysctl = kzalloc(sizeof(*sysctl) * (nr_insn_emulated + 1),
GFP_KERNEL);
insns_sysctl = kcalloc(nr_insn_emulated + 1, sizeof(*sysctl),
GFP_KERNEL);
raw_spin_lock_irqsave(&insn_emulation_lock, flags);
list_for_each_entry(insn, &insn_emulation, node) {

View File

@ -263,7 +263,7 @@ static int asids_init(void)
*/
WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
GFP_KERNEL);
if (!asid_map)
panic("Failed to allocate bitmap for %lu ASIDs\n",

View File

@ -350,7 +350,8 @@ init_record_index_pools(void)
/* - 3 - */
slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1;
slidx_pool.buffer =
kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL);
kmalloc_array(slidx_pool.max_idx, sizeof(slidx_list_t),
GFP_KERNEL);
return slidx_pool.buffer ? 0 : -ENOMEM;
}

View File

@ -85,7 +85,7 @@ static int __init topology_init(void)
}
#endif
sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
sysfs_cpus = kcalloc(NR_CPUS, sizeof(struct ia64_cpu), GFP_KERNEL);
if (!sysfs_cpus)
panic("kzalloc in topology_init failed - NR_CPUS too big?");
@ -319,8 +319,8 @@ static int cpu_cache_sysfs_init(unsigned int cpu)
return -1;
}
this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
GFP_KERNEL);
this_cache=kcalloc(unique_caches, sizeof(struct cache_info),
GFP_KERNEL);
if (this_cache == NULL)
return -ENOMEM;

View File

@ -430,8 +430,9 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
int cpu = smp_processor_id();
if (!ia64_idtrs[cpu]) {
ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX *
sizeof (struct ia64_tr_entry), GFP_KERNEL);
ia64_idtrs[cpu] = kmalloc_array(2 * IA64_TR_ALLOC_MAX,
sizeof(struct ia64_tr_entry),
GFP_KERNEL);
if (!ia64_idtrs[cpu])
return -ENOMEM;
}

View File

@ -132,7 +132,7 @@ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
printk_once(KERN_WARNING
"PROM version < 4.50 -- implementing old PROM flush WAR\n");
war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
war_list = kcalloc(DEV_PER_WIDGET, sizeof(*war_list), GFP_KERNEL);
BUG_ON(!war_list);
SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,

View File

@ -474,7 +474,8 @@ void __init sn_irq_lh_init(void)
{
int i;
sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL);
sn_irq_lh = kmalloc_array(NR_IRQS, sizeof(struct list_head *),
GFP_KERNEL);
if (!sn_irq_lh)
panic("SN PCI INIT: Failed to allocate memory for PCI init\n");

View File

@ -184,7 +184,7 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
/* Setup the PMU ATE map */
soft->pbi_int_ate_resource.lowest_free_index = 0;
soft->pbi_int_ate_resource.ate =
kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL);
kcalloc(soft->pbi_int_ate_size, sizeof(u64), GFP_KERNEL);
if (!soft->pbi_int_ate_resource.ate) {
kfree(soft);

View File

@ -985,7 +985,7 @@ static int __init alchemy_clk_setup_imux(int ctype)
return -ENODEV;
}
a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL);
a = kcalloc(6, sizeof(*a), GFP_KERNEL);
if (!a)
return -ENOMEM;

View File

@ -411,8 +411,8 @@ u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
* and if we try that first we are likely to not waste larger
* slabs of memory.
*/
desc_base = (u32)kmalloc(entries * sizeof(au1x_ddma_desc_t),
GFP_KERNEL|GFP_DMA);
desc_base = (u32)kmalloc_array(entries, sizeof(au1x_ddma_desc_t),
GFP_KERNEL|GFP_DMA);
if (desc_base == 0)
return 0;
@ -1050,7 +1050,7 @@ static int __init dbdma_setup(unsigned int irq, dbdev_tab_t *idtable)
{
int ret;
dbdev_tab = kzalloc(sizeof(dbdev_tab_t) * DBDEV_TAB_SIZE, GFP_KERNEL);
dbdev_tab = kcalloc(DBDEV_TAB_SIZE, sizeof(dbdev_tab_t), GFP_KERNEL);
if (!dbdev_tab)
return -ENOMEM;

View File

@ -115,7 +115,7 @@ static void __init alchemy_setup_uarts(int ctype)
uartclk = clk_get_rate(clk);
clk_put(clk);
ports = kzalloc(s * (c + 1), GFP_KERNEL);
ports = kcalloc(s, (c + 1), GFP_KERNEL);
if (!ports) {
printk(KERN_INFO "Alchemy: no memory for UART data\n");
return;
@ -198,7 +198,7 @@ static unsigned long alchemy_ehci_data[][2] __initdata = {
static int __init _new_usbres(struct resource **r, struct platform_device **d)
{
*r = kzalloc(sizeof(struct resource) * 2, GFP_KERNEL);
*r = kcalloc(2, sizeof(struct resource), GFP_KERNEL);
if (!*r)
return -ENOMEM;
*d = kzalloc(sizeof(struct platform_device), GFP_KERNEL);

View File

@ -103,7 +103,7 @@ int __init db1x_register_pcmcia_socket(phys_addr_t pcmcia_attr_start,
if (stschg_irq)
cnt++;
sr = kzalloc(sizeof(struct resource) * cnt, GFP_KERNEL);
sr = kcalloc(cnt, sizeof(struct resource), GFP_KERNEL);
if (!sr)
return -ENOMEM;
@ -178,7 +178,7 @@ int __init db1x_register_norflash(unsigned long size, int width,
return -EINVAL;
ret = -ENOMEM;
parts = kzalloc(sizeof(struct mtd_partition) * 5, GFP_KERNEL);
parts = kcalloc(5, sizeof(struct mtd_partition), GFP_KERNEL);
if (!parts)
goto out;

View File

@ -94,7 +94,7 @@ static int __init bmips_init_dma_ranges(void)
goto out_bad;
/* add a dummy (zero) entry at the end as a sentinel */
bmips_dma_ranges = kzalloc(sizeof(struct bmips_dma_range) * (len + 1),
bmips_dma_ranges = kcalloc(len + 1, sizeof(struct bmips_dma_range),
GFP_KERNEL);
if (!bmips_dma_ranges)
goto out_bad;

View File

@ -219,7 +219,7 @@ static int __init rbtx4939_led_probe(struct platform_device *pdev)
"nand-disk",
};
leds_data = kzalloc(sizeof(*leds_data) * RBTX4939_MAX_7SEGLEDS,
leds_data = kcalloc(RBTX4939_MAX_7SEGLEDS, sizeof(*leds_data),
GFP_KERNEL);
if (!leds_data)
return -ENOMEM;

View File

@ -559,7 +559,8 @@ static int __init rtas_event_scan_init(void)
rtas_error_log_max = rtas_get_error_log_max();
rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int);
rtas_log_buf = vmalloc(rtas_error_log_buffer_max*LOG_NUMBER);
rtas_log_buf = vmalloc(array_size(LOG_NUMBER,
rtas_error_log_buffer_max));
if (!rtas_log_buf) {
printk(KERN_ERR "rtasd: no memory\n");
return -ENOMEM;

View File

@ -791,7 +791,7 @@ static int __init vdso_init(void)
#ifdef CONFIG_VDSO32
/* Make sure pages are in the correct state */
vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 2),
vdso32_pagelist = kcalloc(vdso32_pages + 2, sizeof(struct page *),
GFP_KERNEL);
BUG_ON(vdso32_pagelist == NULL);
for (i = 0; i < vdso32_pages; i++) {
@ -805,7 +805,7 @@ static int __init vdso_init(void)
#endif
#ifdef CONFIG_PPC64
vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 2),
vdso64_pagelist = kcalloc(vdso64_pages + 2, sizeof(struct page *),
GFP_KERNEL);
BUG_ON(vdso64_pagelist == NULL);
for (i = 0; i < vdso64_pages; i++) {

View File

@ -108,7 +108,7 @@ int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order)
npte = 1ul << (order - 4);
/* Allocate reverse map array */
rev = vmalloc(sizeof(struct revmap_entry) * npte);
rev = vmalloc(array_size(npte, sizeof(struct revmap_entry)));
if (!rev) {
if (cma)
kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT));

View File

@ -3548,7 +3548,7 @@ static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
unsigned long npages)
{
slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
slot->arch.rmap = vzalloc(array_size(npages, sizeof(*slot->arch.rmap)));
if (!slot->arch.rmap)
return -ENOMEM;

View File

@ -54,7 +54,7 @@ static int grow(rh_info_t * info, int max_blocks)
new_blocks = max_blocks - info->max_blocks;
block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_ATOMIC);
block = kmalloc_array(max_blocks, sizeof(rh_block_t), GFP_ATOMIC);
if (block == NULL)
return -ENOMEM;

View File

@ -159,7 +159,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
goto unlock_exit;
}
mem->hpas = vzalloc(entries * sizeof(mem->hpas[0]));
mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
if (!mem->hpas) {
kfree(mem);
ret = -ENOMEM;

View File

@ -1316,7 +1316,7 @@ int numa_update_cpu_topology(bool cpus_locked)
if (!weight)
return 0;
updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
updates = kcalloc(weight, sizeof(*updates), GFP_KERNEL);
if (!updates)
return 0;

View File

@ -566,7 +566,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
if (!bpf_jit_enable)
return;
addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
if (addrs == NULL)
return;

View File

@ -949,7 +949,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
goto skip_init_ctx;
}
addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
if (addrs == NULL) {
fp = org_fp;
goto out_addrs;

View File

@ -210,8 +210,8 @@ int start_spu_profiling_cycles(unsigned int cycles_reset)
timer.function = profile_spus;
/* Allocate arrays for collecting SPU PC samples */
samples = kzalloc(SPUS_PER_NODE *
TRACE_ARRAY_SIZE * sizeof(u32), GFP_KERNEL);
samples = kcalloc(SPUS_PER_NODE * TRACE_ARRAY_SIZE, sizeof(u32),
GFP_KERNEL);
if (!samples)
return -ENOMEM;

View File

@ -156,7 +156,8 @@ static int hsta_msi_probe(struct platform_device *pdev)
if (ret)
goto out;
ppc4xx_hsta_msi.irq_map = kmalloc(sizeof(int) * irq_count, GFP_KERNEL);
ppc4xx_hsta_msi.irq_map = kmalloc_array(irq_count, sizeof(int),
GFP_KERNEL);
if (!ppc4xx_hsta_msi.irq_map) {
ret = -ENOMEM;
goto out1;

View File

@ -89,7 +89,7 @@ static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (type == PCI_CAP_ID_MSIX)
pr_debug("ppc4xx msi: MSI-X untested, trying anyway.\n");
msi_data->msi_virqs = kmalloc((msi_irqs) * sizeof(int), GFP_KERNEL);
msi_data->msi_virqs = kmalloc_array(msi_irqs, sizeof(int), GFP_KERNEL);
if (!msi_data->msi_virqs)
return -ENOMEM;

View File

@ -1449,7 +1449,7 @@ static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
count = ppc4xx_pciex_hwops->core_init(np);
if (count > 0) {
ppc4xx_pciex_ports =
kzalloc(count * sizeof(struct ppc4xx_pciex_port),
kcalloc(count, sizeof(struct ppc4xx_pciex_port),
GFP_KERNEL);
if (ppc4xx_pciex_ports) {
ppc4xx_pciex_port_count = count;

View File

@ -198,21 +198,21 @@ void __init opal_sys_param_init(void)
goto out_param_buf;
}
id = kzalloc(sizeof(*id) * count, GFP_KERNEL);
id = kcalloc(count, sizeof(*id), GFP_KERNEL);
if (!id) {
pr_err("SYSPARAM: Failed to allocate memory to read parameter "
"id\n");
goto out_param_buf;
}
size = kzalloc(sizeof(*size) * count, GFP_KERNEL);
size = kcalloc(count, sizeof(*size), GFP_KERNEL);
if (!size) {
pr_err("SYSPARAM: Failed to allocate memory to read parameter "
"size\n");
goto out_free_id;
}
perm = kzalloc(sizeof(*perm) * count, GFP_KERNEL);
perm = kcalloc(count, sizeof(*perm), GFP_KERNEL);
if (!perm) {
pr_err("SYSPARAM: Failed to allocate memory to read supported "
"action on the parameter");
@ -235,7 +235,7 @@ void __init opal_sys_param_init(void)
goto out_free_perm;
}
attr = kzalloc(sizeof(*attr) * count, GFP_KERNEL);
attr = kcalloc(count, sizeof(*attr), GFP_KERNEL);
if (!attr) {
pr_err("SYSPARAM: Failed to allocate memory for parameter "
"attributes\n");

View File

@ -544,7 +544,7 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n");
/* Allocate fixups array */
mpic->fixups = kzalloc(128 * sizeof(*mpic->fixups), GFP_KERNEL);
mpic->fixups = kcalloc(128, sizeof(*mpic->fixups), GFP_KERNEL);
BUG_ON(mpic->fixups == NULL);
/* Init spinlock */
@ -1324,7 +1324,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
if (psrc) {
/* Allocate a bitmap with one bit per interrupt */
unsigned int mapsize = BITS_TO_LONGS(intvec_top + 1);
mpic->protected = kzalloc(mapsize*sizeof(long), GFP_KERNEL);
mpic->protected = kcalloc(mapsize, sizeof(long), GFP_KERNEL);
BUG_ON(mpic->protected == NULL);
for (i = 0; i < psize/sizeof(u32); i++) {
if (psrc[i] > intvec_top)
@ -1639,8 +1639,9 @@ void __init mpic_init(struct mpic *mpic)
#ifdef CONFIG_PM
/* allocate memory to save mpic state */
mpic->save_data = kmalloc(mpic->num_sources * sizeof(*mpic->save_data),
GFP_KERNEL);
mpic->save_data = kmalloc_array(mpic->num_sources,
sizeof(*mpic->save_data),
GFP_KERNEL);
BUG_ON(mpic->save_data == NULL);
#endif

View File

@ -489,7 +489,7 @@ static bool xive_parse_provisioning(struct device_node *np)
if (rc == 0)
return true;
xive_provision_chips = kzalloc(4 * xive_provision_chip_count,
xive_provision_chips = kcalloc(4, xive_provision_chip_count,
GFP_KERNEL);
if (WARN_ON(!xive_provision_chips))
return false;

View File

@ -391,7 +391,7 @@ int appldata_register_ops(struct appldata_ops *ops)
if (ops->size > APPLDATA_MAX_REC_SIZE)
return -EINVAL;
ops->ctl_table = kzalloc(4 * sizeof(struct ctl_table), GFP_KERNEL);
ops->ctl_table = kcalloc(4, sizeof(struct ctl_table), GFP_KERNEL);
if (!ops->ctl_table)
return -ENOMEM;

View File

@ -239,7 +239,7 @@ static void *page_align_ptr(void *ptr)
static void *diag204_alloc_vbuf(int pages)
{
/* The buffer has to be page aligned! */
diag204_buf_vmalloc = vmalloc(PAGE_SIZE * (pages + 1));
diag204_buf_vmalloc = vmalloc(array_size(PAGE_SIZE, (pages + 1)));
if (!diag204_buf_vmalloc)
return ERR_PTR(-ENOMEM);
diag204_buf = page_align_ptr(diag204_buf_vmalloc);

View File

@ -49,7 +49,8 @@ static void *diag0c_store(unsigned int *count)
get_online_cpus();
cpu_count = num_online_cpus();
cpu_vec = kmalloc(sizeof(*cpu_vec) * num_possible_cpus(), GFP_KERNEL);
cpu_vec = kmalloc_array(num_possible_cpus(), sizeof(*cpu_vec),
GFP_KERNEL);
if (!cpu_vec)
goto fail_put_online_cpus;
/* Note: Diag 0c needs 8 byte alignment and real storage */

View File

@ -194,11 +194,13 @@ static debug_entry_t ***debug_areas_alloc(int pages_per_area, int nr_areas)
debug_entry_t ***areas;
int i, j;
areas = kmalloc(nr_areas * sizeof(debug_entry_t **), GFP_KERNEL);
areas = kmalloc_array(nr_areas, sizeof(debug_entry_t **), GFP_KERNEL);
if (!areas)
goto fail_malloc_areas;
for (i = 0; i < nr_areas; i++) {
areas[i] = kmalloc(pages_per_area * sizeof(debug_entry_t *), GFP_KERNEL);
areas[i] = kmalloc_array(pages_per_area,
sizeof(debug_entry_t *),
GFP_KERNEL);
if (!areas[i])
goto fail_malloc_areas2;
for (j = 0; j < pages_per_area; j++) {

View File

@ -123,8 +123,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
/* Allocate one syminfo structure per symbol. */
me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym);
me->arch.syminfo = vmalloc(me->arch.nsyms *
sizeof(struct mod_arch_syminfo));
me->arch.syminfo = vmalloc(array_size(sizeof(struct mod_arch_syminfo),
me->arch.nsyms));
if (!me->arch.syminfo)
return -ENOMEM;
symbols = (void *) hdr + symtab->sh_offset;

View File

@ -527,7 +527,7 @@ static __init struct attribute **merge_attr(struct attribute **a,
j++;
j++;
new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
new = kmalloc_array(j, sizeof(struct attribute *), GFP_KERNEL);
if (!new)
return NULL;
j = 0;

View File

@ -315,7 +315,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
if (pages <= 0)
return;
diag204_buf = vmalloc(PAGE_SIZE * pages);
diag204_buf = vmalloc(array_size(pages, PAGE_SIZE));
if (!diag204_buf)
return;

View File

@ -285,7 +285,7 @@ static int __init vdso_init(void)
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
/* Make sure pages are in the correct state */
vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
vdso32_pagelist = kcalloc(vdso32_pages + 1, sizeof(struct page *),
GFP_KERNEL);
BUG_ON(vdso32_pagelist == NULL);
for (i = 0; i < vdso32_pages - 1; i++) {
@ -303,7 +303,7 @@ static int __init vdso_init(void)
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
/* Make sure pages are in the correct state */
vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *),
GFP_KERNEL);
BUG_ON(vdso64_pagelist == NULL);
for (i = 0; i < vdso64_pages - 1; i++) {

View File

@ -847,7 +847,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
pages = pages_array;
if (nr_pages > ARRAY_SIZE(pages_array))
pages = vmalloc(nr_pages * sizeof(unsigned long));
pages = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
if (!pages)
return -ENOMEM;
need_ipte_lock = psw_bits(*psw).dat && !asce.r;

View File

@ -1729,7 +1729,7 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm,
if (args->count == 0)
return 0;
bits = vmalloc(sizeof(*bits) * args->count);
bits = vmalloc(array_size(sizeof(*bits), args->count));
if (!bits)
return -ENOMEM;

View File

@ -103,7 +103,7 @@ static int scode_set;
static int
dcss_set_subcodes(void)
{
char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA);
char *name = kmalloc(8, GFP_KERNEL | GFP_DMA);
unsigned long rx, ry;
int rc;

View File

@ -154,7 +154,7 @@ static int __init dmabrg_init(void)
unsigned long or;
int ret;
dmabrg_handlers = kzalloc(10 * sizeof(struct dmabrg_handler),
dmabrg_handlers = kcalloc(10, sizeof(struct dmabrg_handler),
GFP_KERNEL);
if (!dmabrg_handlers)
return -ENOMEM;

View File

@ -561,7 +561,7 @@ static int __init sh7786_pcie_init(void)
if (unlikely(nr_ports == 0))
return -ENODEV;
sh7786_pcie_ports = kzalloc(nr_ports * sizeof(struct sh7786_pcie_port),
sh7786_pcie_ports = kcalloc(nr_ports, sizeof(struct sh7786_pcie_port),
GFP_KERNEL);
if (unlikely(!sh7786_pcie_ports))
return -ENOMEM;

View File

@ -166,7 +166,8 @@ static int __init check_nmi_watchdog(void)
if (!atomic_read(&nmi_active))
return 0;
prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL);
prev_nmi_count = kmalloc_array(nr_cpu_ids, sizeof(unsigned int),
GFP_KERNEL);
if (!prev_nmi_count) {
err = -ENOMEM;
goto error;

View File

@ -565,7 +565,8 @@ SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
}
if (!current_thread_info()->utraps) {
current_thread_info()->utraps =
kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
kcalloc(UT_TRAP_INSTRUCTION_31 + 1, sizeof(long),
GFP_KERNEL);
if (!current_thread_info()->utraps)
return -ENOMEM;
current_thread_info()->utraps[0] = 1;
@ -575,8 +576,9 @@ SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
unsigned long *p = current_thread_info()->utraps;
current_thread_info()->utraps =
kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
GFP_KERNEL);
kmalloc_array(UT_TRAP_INSTRUCTION_31 + 1,
sizeof(long),
GFP_KERNEL);
if (!current_thread_info()->utraps) {
current_thread_info()->utraps = p;
return -ENOMEM;

View File

@ -335,7 +335,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
if (!bpf_jit_enable)
return;
addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
addrs = kmalloc_array(flen, sizeof(*addrs), GFP_KERNEL);
if (addrs == NULL)
return;

View File

@ -1127,9 +1127,9 @@ static int __init ubd_init(void)
return -1;
}
irq_req_buffer = kmalloc(
sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE,
GFP_KERNEL
irq_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE,
sizeof(struct io_thread_req *),
GFP_KERNEL
);
irq_remainder = 0;
@ -1137,9 +1137,9 @@ static int __init ubd_init(void)
printk(KERN_ERR "Failed to initialize ubd buffering\n");
return -1;
}
io_req_buffer = kmalloc(
sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE,
GFP_KERNEL
io_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE,
sizeof(struct io_thread_req *),
GFP_KERNEL
);
io_remainder = 0;

View File

@ -527,14 +527,14 @@ static struct vector_queue *create_queue(
result->max_iov_frags = num_extra_frags;
for (i = 0; i < max_size; i++) {
if (vp->header_size > 0)
iov = kmalloc(
sizeof(struct iovec) * (3 + num_extra_frags),
GFP_KERNEL
iov = kmalloc_array(3 + num_extra_frags,
sizeof(struct iovec),
GFP_KERNEL
);
else
iov = kmalloc(
sizeof(struct iovec) * (2 + num_extra_frags),
GFP_KERNEL
iov = kmalloc_array(2 + num_extra_frags,
sizeof(struct iovec),
GFP_KERNEL
);
if (iov == NULL)
goto out_fail;

View File

@ -109,8 +109,9 @@ static int __init puv3_pm_init(void)
return -EINVAL;
}
sleep_save = kmalloc(puv3_cpu_pm_fns->save_count
* sizeof(unsigned long), GFP_KERNEL);
sleep_save = kmalloc_array(puv3_cpu_pm_fns->save_count,
sizeof(unsigned long),
GFP_KERNEL);
if (!sleep_save) {
printk(KERN_ERR "failed to alloc memory for pm save\n");
return -ENOMEM;

View File

@ -387,7 +387,7 @@ static __init int _init_events_attrs(void)
while (amd_iommu_v2_event_descs[i].attr.attr.name)
i++;
attrs = kzalloc(sizeof(struct attribute **) * (i + 1), GFP_KERNEL);
attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL);
if (!attrs)
return -ENOMEM;

View File

@ -1637,7 +1637,7 @@ __init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
j++;
j++;
new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
new = kmalloc_array(j, sizeof(struct attribute *), GFP_KERNEL);
if (!new)
return NULL;

View File

@ -865,12 +865,10 @@ static void uncore_types_exit(struct intel_uncore_type **types)
static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
{
struct intel_uncore_pmu *pmus;
struct attribute_group *attr_group;
struct attribute **attrs;
size_t size;
int i, j;
pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL);
if (!pmus)
return -ENOMEM;
@ -891,21 +889,24 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
0, type->num_counters, 0, 0);
if (type->event_descs) {
struct {
struct attribute_group group;
struct attribute *attrs[];
} *attr_group;
for (i = 0; type->event_descs[i].attr.attr.name; i++);
attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
sizeof(*attr_group), GFP_KERNEL);
attr_group = kzalloc(struct_size(attr_group, attrs, i + 1),
GFP_KERNEL);
if (!attr_group)
goto err;
attrs = (struct attribute **)(attr_group + 1);
attr_group->name = "events";
attr_group->attrs = attrs;
attr_group->group.name = "events";
attr_group->group.attrs = attr_group->attrs;
for (j = 0; j < i; j++)
attrs[j] = &type->event_descs[j].attr.attr;
attr_group->attrs[j] = &type->event_descs[j].attr.attr;
type->events_group = attr_group;
type->events_group = &attr_group->group;
}
type->pmu_group = &uncore_pmu_attr_group;

View File

@ -1457,7 +1457,7 @@ static int __mcheck_cpu_mce_banks_init(void)
int i;
u8 num_banks = mca_cfg.banks;
mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
mce_banks = kcalloc(num_banks, sizeof(struct mce_bank), GFP_KERNEL);
if (!mce_banks)
return -ENOMEM;

View File

@ -1384,7 +1384,7 @@ int mce_threshold_create_device(unsigned int cpu)
if (bp)
return 0;
bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks,
bp = kcalloc(mca_cfg.banks, sizeof(struct threshold_bank *),
GFP_KERNEL);
if (!bp)
return -ENOMEM;

View File

@ -43,7 +43,7 @@ mtrr_file_add(unsigned long base, unsigned long size,
max = num_var_ranges;
if (fcount == NULL) {
fcount = kzalloc(max * sizeof *fcount, GFP_KERNEL);
fcount = kcalloc(max, sizeof(*fcount), GFP_KERNEL);
if (!fcount)
return -ENOMEM;
FILE_FCOUNT(file) = fcount;

View File

@ -610,7 +610,7 @@ static void hpet_msi_capability_lookup(unsigned int start_timer)
if (!hpet_domain)
return;
hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL);
hpet_devs = kcalloc(num_timers, sizeof(struct hpet_dev), GFP_KERNEL);
if (!hpet_devs)
return;
@ -966,8 +966,8 @@ int __init hpet_enable(void)
#endif
cfg = hpet_readl(HPET_CFG);
hpet_boot_cfg = kmalloc((last + 2) * sizeof(*hpet_boot_cfg),
GFP_KERNEL);
hpet_boot_cfg = kmalloc_array(last + 2, sizeof(*hpet_boot_cfg),
GFP_KERNEL);
if (hpet_boot_cfg)
*hpet_boot_cfg = cfg;
else

View File

@ -283,7 +283,7 @@ static int __init create_setup_data_nodes(struct kobject *parent)
if (ret)
goto out_setup_data_kobj;
kobjp = kmalloc(sizeof(*kobjp) * nr, GFP_KERNEL);
kobjp = kmalloc_array(nr, sizeof(*kobjp), GFP_KERNEL);
if (!kobjp) {
ret = -ENOMEM;
goto out_setup_data_kobj;

View File

@ -203,8 +203,9 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
goto out;
r = -ENOMEM;
if (cpuid->nent) {
cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
cpuid->nent);
cpuid_entries =
vmalloc(array_size(sizeof(struct kvm_cpuid_entry),
cpuid->nent));
if (!cpuid_entries)
goto out;
r = -EFAULT;
@ -785,7 +786,8 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
return -EINVAL;
r = -ENOMEM;
cpuid_entries = vzalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
cpuid_entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2),
cpuid->nent));
if (!cpuid_entries)
goto out;

View File

@ -40,8 +40,9 @@ int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
int i;
for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
slot->arch.gfn_track[i] = kvzalloc(npages *
sizeof(*slot->arch.gfn_track[i]), GFP_KERNEL);
slot->arch.gfn_track[i] =
kvcalloc(npages, sizeof(*slot->arch.gfn_track[i]),
GFP_KERNEL);
if (!slot->arch.gfn_track[i])
goto track_free;
}

View File

@ -1001,7 +1001,9 @@ static int svm_cpu_init(int cpu)
if (svm_sev_enabled()) {
r = -ENOMEM;
sd->sev_vmcbs = kmalloc((max_sev_asid + 1) * sizeof(void *), GFP_KERNEL);
sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
sizeof(void *),
GFP_KERNEL);
if (!sd->sev_vmcbs)
goto err_1;
}

View File

@ -8900,13 +8900,14 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
slot->base_gfn, level) + 1;
slot->arch.rmap[i] =
kvzalloc(lpages * sizeof(*slot->arch.rmap[i]), GFP_KERNEL);
kvcalloc(lpages, sizeof(*slot->arch.rmap[i]),
GFP_KERNEL);
if (!slot->arch.rmap[i])
goto out_free;
if (i == 0)
continue;
linfo = kvzalloc(lpages * sizeof(*linfo), GFP_KERNEL);
linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL);
if (!linfo)
goto out_free;

View File

@ -1107,7 +1107,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
extra_pass = true;
goto skip_init_addrs;
}
addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
prog = orig_prog;
goto out_addrs;

View File

@ -2345,7 +2345,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = tmp;
}
addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
prog = orig_prog;
goto out;

View File

@ -168,7 +168,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
v = kcalloc(max(1, nvec), sizeof(int), GFP_KERNEL);
if (!v)
return -ENOMEM;

View File

@ -2142,7 +2142,7 @@ static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
timeout_us = calculate_destination_timeout();
vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
vp = kmalloc_array(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL);
uvhub_descs = (struct uvhub_desc *)vp;
memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);

View File

@ -158,7 +158,7 @@ static __init int uv_rtc_allocate_timers(void)
{
int cpu;
blade_info = kzalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL);
blade_info = kcalloc(uv_possible_blades, sizeof(void *), GFP_KERNEL);
if (!blade_info)
return -ENOMEM;

View File

@ -2091,7 +2091,8 @@ static int __init init_bio(void)
{
bio_slab_max = 2;
bio_slab_nr = 0;
bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
GFP_KERNEL);
if (!bio_slabs)
panic("bio: can't allocate bios\n");

View File

@ -1903,7 +1903,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
if (!tags)
return NULL;
tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
node);
if (!tags->rqs) {
@ -1911,9 +1911,9 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
return NULL;
}
tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
node);
tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
node);
if (!tags->static_rqs) {
kfree(tags->rqs);
blk_mq_free_tags(tags);
@ -2522,7 +2522,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
/* init q->mq_kobj and sw queues' kobjects */
blk_mq_sysfs_init(q);
q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
q->queue_hw_ctx = kcalloc_node(nr_cpu_ids, sizeof(*(q->queue_hw_ctx)),
GFP_KERNEL, set->numa_node);
if (!q->queue_hw_ctx)
goto err_percpu;
@ -2741,14 +2741,14 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (set->nr_hw_queues > nr_cpu_ids)
set->nr_hw_queues = nr_cpu_ids;
set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
set->tags = kcalloc_node(nr_cpu_ids, sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node);
if (!set->tags)
return -ENOMEM;
ret = -ENOMEM;
set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
GFP_KERNEL, set->numa_node);
set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map),
GFP_KERNEL, set->numa_node);
if (!set->mq_map)
goto out_free_tags;

View File

@ -99,12 +99,12 @@ init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
__func__, depth);
}
tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
tag_index = kcalloc(depth, sizeof(struct request *), GFP_ATOMIC);
if (!tag_index)
goto fail;
nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
tag_map = kcalloc(nr_ulongs, sizeof(unsigned long), GFP_ATOMIC);
if (!tag_map)
goto fail;

View File

@ -331,8 +331,8 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
if (rep.nr_zones > INT_MAX / sizeof(struct blk_zone))
return -ERANGE;
zones = kvmalloc(rep.nr_zones * sizeof(struct blk_zone),
GFP_KERNEL | __GFP_ZERO);
zones = kvmalloc_array(rep.nr_zones, sizeof(struct blk_zone),
GFP_KERNEL | __GFP_ZERO);
if (!zones)
return -ENOMEM;

View File

@ -122,7 +122,7 @@ static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
return NULL;
nr = disk_max_parts(hd);
state->parts = vzalloc(nr * sizeof(state->parts[0]));
state->parts = vzalloc(array_size(nr, sizeof(state->parts[0])));
if (!state->parts) {
kfree(state);
return NULL;

View File

@ -378,7 +378,7 @@ static bool ldm_validate_tocblocks(struct parsed_partitions *state,
BUG_ON(!state || !ldb);
ph = &ldb->ph;
tb[0] = &ldb->toc;
tb[1] = kmalloc(sizeof(*tb[1]) * 3, GFP_KERNEL);
tb[1] = kmalloc_array(3, sizeof(*tb[1]), GFP_KERNEL);
if (!tb[1]) {
ldm_crit("Out of memory.");
goto err;

View File

@ -255,8 +255,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
processed - as);
if (!areq->tsgl_entries)
areq->tsgl_entries = 1;
areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) *
areq->tsgl_entries,
areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
areq->tsgl_entries),
GFP_KERNEL);
if (!areq->tsgl) {
err = -ENOMEM;

View File

@ -100,7 +100,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
if (!areq->tsgl_entries)
areq->tsgl_entries = 1;
areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries,
areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
areq->tsgl_entries),
GFP_KERNEL);
if (!areq->tsgl) {
err = -ENOMEM;

View File

@ -603,7 +603,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
goto out_nooutbuf;
/* avoid "the frame size is larger than 1024 bytes" compiler warning */
sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 4 : 2), GFP_KERNEL);
sg = kmalloc(array3_size(sizeof(*sg), 8, (diff_dst ? 4 : 2)),
GFP_KERNEL);
if (!sg)
goto out_nosg;
sgout = &sg[16];

View File

@ -82,7 +82,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
if (count < 0) {
return NULL;
} else if (count > 0) {
resources = kzalloc(count * sizeof(struct resource),
resources = kcalloc(count, sizeof(struct resource),
GFP_KERNEL);
if (!resources) {
dev_err(&adev->dev, "No memory for resources\n");

View File

@ -832,8 +832,9 @@ int acpi_video_get_levels(struct acpi_device *device,
* in order to account for buggy BIOS which don't export the first two
* special levels (see below)
*/
br->levels = kmalloc((obj->package.count + ACPI_VIDEO_FIRST_LEVEL) *
sizeof(*br->levels), GFP_KERNEL);
br->levels = kmalloc_array(obj->package.count + ACPI_VIDEO_FIRST_LEVEL,
sizeof(*br->levels),
GFP_KERNEL);
if (!br->levels) {
result = -ENOMEM;
goto out_free;

View File

@ -524,7 +524,8 @@ static int __erst_record_id_cache_add_one(void)
pr_warn(FW_WARN "too many record IDs!\n");
return 0;
}
new_entries = kvmalloc(new_size * sizeof(entries[0]), GFP_KERNEL);
new_entries = kvmalloc_array(new_size, sizeof(entries[0]),
GFP_KERNEL);
if (!new_entries)
return -ENOMEM;
memcpy(new_entries, entries,

View File

@ -195,7 +195,8 @@ static int __init hest_ghes_dev_register(unsigned int ghes_count)
struct ghes_arr ghes_arr;
ghes_arr.count = 0;
ghes_arr.ghes_devs = kmalloc(sizeof(void *) * ghes_count, GFP_KERNEL);
ghes_arr.ghes_devs = kmalloc_array(ghes_count, sizeof(void *),
GFP_KERNEL);
if (!ghes_arr.ghes_devs)
return -ENOMEM;

View File

@ -298,8 +298,8 @@ static int acpi_fan_get_fps(struct acpi_device *device)
}
fan->fps_count = obj->package.count - 1; /* minus revision field */
fan->fps = devm_kzalloc(&device->dev,
fan->fps_count * sizeof(struct acpi_fan_fps),
fan->fps = devm_kcalloc(&device->dev,
fan->fps_count, sizeof(struct acpi_fan_fps),
GFP_KERNEL);
if (!fan->fps) {
dev_err(&device->dev, "Not enough memory\n");

View File

@ -1082,9 +1082,10 @@ static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
continue;
nfit_mem->nfit_flush = nfit_flush;
flush = nfit_flush->flush;
nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev,
flush->hint_count
* sizeof(struct resource), GFP_KERNEL);
nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev,
flush->hint_count,
sizeof(struct resource),
GFP_KERNEL);
if (!nfit_mem->flush_wpq)
return -ENOMEM;
for (i = 0; i < flush->hint_count; i++) {

View File

@ -343,8 +343,9 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
pr->performance->state_count = pss->package.count;
pr->performance->states =
kmalloc(sizeof(struct acpi_processor_px) * pss->package.count,
GFP_KERNEL);
kmalloc_array(pss->package.count,
sizeof(struct acpi_processor_px),
GFP_KERNEL);
if (!pr->performance->states) {
result = -ENOMEM;
goto end;

View File

@ -534,8 +534,9 @@ static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
pr->throttling.state_count = tss->package.count;
pr->throttling.states_tss =
kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
GFP_KERNEL);
kmalloc_array(tss->package.count,
sizeof(struct acpi_processor_tx_tss),
GFP_KERNEL);
if (!pr->throttling.states_tss) {
result = -ENOMEM;
goto end;

View File

@ -857,12 +857,12 @@ void acpi_irq_stats_init(void)
num_gpes = acpi_current_gpe_count;
num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1),
all_attrs = kcalloc(num_counters + 1, sizeof(struct attribute *),
GFP_KERNEL);
if (all_attrs == NULL)
return;
all_counters = kzalloc(sizeof(struct event_counter) * (num_counters),
all_counters = kcalloc(num_counters, sizeof(struct event_counter),
GFP_KERNEL);
if (all_counters == NULL)
goto fail;
@ -871,7 +871,7 @@ void acpi_irq_stats_init(void)
if (ACPI_FAILURE(status))
goto fail;
counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters),
counter_attrs = kcalloc(num_counters, sizeof(struct kobj_attribute),
GFP_KERNEL);
if (counter_attrs == NULL)
goto fail;

View File

@ -692,8 +692,8 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
}
}
#endif
alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
((vma->vm_end - vma->vm_start) / PAGE_SIZE),
alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
if (alloc->pages == NULL) {
ret = -ENOMEM;

View File

@ -6987,7 +6987,7 @@ static void __init ata_parse_force_param(void)
if (*p == ',')
size++;
ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
if (!ata_force_tbl) {
printk(KERN_WARNING "ata: failed to extend force table, "
"libata.force ignored\n");

View File

@ -340,7 +340,7 @@ static int sata_pmp_init_links (struct ata_port *ap, int nr_ports)
int i, err;
if (!pmp_link) {
pmp_link = kzalloc(sizeof(pmp_link[0]) * SATA_PMP_MAX_PORTS,
pmp_link = kcalloc(SATA_PMP_MAX_PORTS, sizeof(pmp_link[0]),
GFP_NOIO);
if (!pmp_link)
return -ENOMEM;

View File

@ -4114,13 +4114,13 @@ static int mv_platform_probe(struct platform_device *pdev)
if (!host || !hpriv)
return -ENOMEM;
hpriv->port_clks = devm_kzalloc(&pdev->dev,
sizeof(struct clk *) * n_ports,
hpriv->port_clks = devm_kcalloc(&pdev->dev,
n_ports, sizeof(struct clk *),
GFP_KERNEL);
if (!hpriv->port_clks)
return -ENOMEM;
hpriv->port_phys = devm_kzalloc(&pdev->dev,
sizeof(struct phy *) * n_ports,
hpriv->port_phys = devm_kcalloc(&pdev->dev,
n_ports, sizeof(struct phy *),
GFP_KERNEL);
if (!hpriv->port_phys)
return -ENOMEM;

View File

@ -2094,7 +2094,8 @@ static int fore200e_alloc_rx_buf(struct fore200e *fore200e)
DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
/* allocate the array of receive buffers */
buffer = bsq->buffer = kzalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
buffer = bsq->buffer = kcalloc(nbr, sizeof(struct buffer),
GFP_KERNEL);
if (buffer == NULL)
return -ENOMEM;

Some files were not shown because too many files have changed in this diff Show More