treewide: Use struct_size() for kmalloc()-family
One of the more common cases of allocation size calculations is finding the size of a structure that has a zero-sized array at the end, along with memory for some number of elements for that array. For example: struct foo { int stuff; void *entry[]; }; instance = kmalloc(sizeof(struct foo) + sizeof(void *) * count, GFP_KERNEL); Instead of leaving these open-coded and prone to type mistakes, we can now use the new struct_size() helper: instance = kmalloc(struct_size(instance, entry, count), GFP_KERNEL); This patch makes the changes for kmalloc()-family (and kvmalloc()-family) uses. It was done via automatic conversion with manual review for the "CHECKME" non-standard cases noted below, using the following Coccinelle script: // pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len * // sizeof *pkey_cache->table, GFP_KERNEL); @@ identifier alloc =~ "kmalloc|kzalloc|kvmalloc|kvzalloc"; expression GFP; identifier VAR, ELEMENT; expression COUNT; @@ - alloc(sizeof(*VAR) + COUNT * sizeof(*VAR->ELEMENT), GFP) + alloc(struct_size(VAR, ELEMENT, COUNT), GFP) // mr = kzalloc(sizeof(*mr) + m * sizeof(mr->map[0]), GFP_KERNEL); @@ identifier alloc =~ "kmalloc|kzalloc|kvmalloc|kvzalloc"; expression GFP; identifier VAR, ELEMENT; expression COUNT; @@ - alloc(sizeof(*VAR) + COUNT * sizeof(VAR->ELEMENT[0]), GFP) + alloc(struct_size(VAR, ELEMENT, COUNT), GFP) // Same pattern, but can't trivially locate the trailing element name, // or variable name. @@ identifier alloc =~ "kmalloc|kzalloc|kvmalloc|kvzalloc"; expression GFP; expression SOMETHING, COUNT, ELEMENT; @@ - alloc(sizeof(SOMETHING) + COUNT * sizeof(ELEMENT), GFP) + alloc(CHECKME_struct_size(&SOMETHING, ELEMENT, COUNT), GFP) Signed-off-by: Kees Cook <keescook@chromium.org>
This commit is contained in:
Родитель
2509b561f7
Коммит
acafe7e302
|
@ -197,8 +197,8 @@ void __init iproc_asiu_setup(struct device_node *node,
|
|||
if (WARN_ON(!asiu))
|
||||
return;
|
||||
|
||||
asiu->clk_data = kzalloc(sizeof(*asiu->clk_data->hws) * num_clks +
|
||||
sizeof(*asiu->clk_data), GFP_KERNEL);
|
||||
asiu->clk_data = kzalloc(struct_size(asiu->clk_data, hws, num_clks),
|
||||
GFP_KERNEL);
|
||||
if (WARN_ON(!asiu->clk_data))
|
||||
goto err_clks;
|
||||
asiu->clk_data->num = num_clks;
|
||||
|
|
|
@ -744,8 +744,7 @@ void iproc_pll_clk_setup(struct device_node *node,
|
|||
if (WARN_ON(!pll))
|
||||
return;
|
||||
|
||||
clk_data = kzalloc(sizeof(*clk_data->hws) * num_clks +
|
||||
sizeof(*clk_data), GFP_KERNEL);
|
||||
clk_data = kzalloc(struct_size(clk_data, hws, num_clks), GFP_KERNEL);
|
||||
if (WARN_ON(!clk_data))
|
||||
goto err_clk_data;
|
||||
clk_data->num = num_clks;
|
||||
|
|
|
@ -509,8 +509,7 @@ static void __init berlin2_clock_setup(struct device_node *np)
|
|||
u8 avpll_flags = 0;
|
||||
int n, ret;
|
||||
|
||||
clk_data = kzalloc(sizeof(*clk_data) +
|
||||
sizeof(*clk_data->hws) * MAX_CLKS, GFP_KERNEL);
|
||||
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
|
||||
if (!clk_data)
|
||||
return;
|
||||
clk_data->num = MAX_CLKS;
|
||||
|
|
|
@ -295,8 +295,7 @@ static void __init berlin2q_clock_setup(struct device_node *np)
|
|||
struct clk_hw **hws;
|
||||
int n, ret;
|
||||
|
||||
clk_data = kzalloc(sizeof(*clk_data) +
|
||||
sizeof(*clk_data->hws) * MAX_CLKS, GFP_KERNEL);
|
||||
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
|
||||
if (!clk_data)
|
||||
return;
|
||||
clk_data->num = MAX_CLKS;
|
||||
|
|
|
@ -273,8 +273,7 @@ static void __init asm9260_acc_init(struct device_node *np)
|
|||
int n;
|
||||
u32 accuracy = 0;
|
||||
|
||||
clk_data = kzalloc(sizeof(*clk_data) +
|
||||
sizeof(*clk_data->hws) * MAX_CLKS, GFP_KERNEL);
|
||||
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
|
||||
if (!clk_data)
|
||||
return;
|
||||
clk_data->num = MAX_CLKS;
|
||||
|
|
|
@ -627,9 +627,9 @@ static void __init aspeed_cc_init(struct device_node *np)
|
|||
if (!scu_base)
|
||||
return;
|
||||
|
||||
aspeed_clk_data = kzalloc(sizeof(*aspeed_clk_data) +
|
||||
sizeof(*aspeed_clk_data->hws) * ASPEED_NUM_CLKS,
|
||||
GFP_KERNEL);
|
||||
aspeed_clk_data = kzalloc(struct_size(aspeed_clk_data, hws,
|
||||
ASPEED_NUM_CLKS),
|
||||
GFP_KERNEL);
|
||||
if (!aspeed_clk_data)
|
||||
return;
|
||||
|
||||
|
|
|
@ -54,9 +54,9 @@ static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base,
|
|||
if (!base)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
clps711x_clk = kzalloc(sizeof(*clps711x_clk) +
|
||||
sizeof(*clps711x_clk->clk_data.hws) * CLPS711X_CLK_MAX,
|
||||
GFP_KERNEL);
|
||||
clps711x_clk = kzalloc(struct_size(clps711x_clk, clk_data.hws,
|
||||
CLPS711X_CLK_MAX),
|
||||
GFP_KERNEL);
|
||||
if (!clps711x_clk)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
|
@ -25,8 +25,8 @@ static void __init efm32gg_cmu_init(struct device_node *np)
|
|||
void __iomem *base;
|
||||
struct clk_hw **hws;
|
||||
|
||||
clk_data = kzalloc(sizeof(*clk_data) +
|
||||
sizeof(*clk_data->hws) * CMU_MAX_CLKS, GFP_KERNEL);
|
||||
clk_data = kzalloc(struct_size(clk_data, hws, CMU_MAX_CLKS),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!clk_data)
|
||||
return;
|
||||
|
|
|
@ -399,9 +399,9 @@ static void __init gemini_cc_init(struct device_node *np)
|
|||
int ret;
|
||||
int i;
|
||||
|
||||
gemini_clk_data = kzalloc(sizeof(*gemini_clk_data) +
|
||||
sizeof(*gemini_clk_data->hws) * GEMINI_NUM_CLKS,
|
||||
GFP_KERNEL);
|
||||
gemini_clk_data = kzalloc(struct_size(gemini_clk_data, hws,
|
||||
GEMINI_NUM_CLKS),
|
||||
GFP_KERNEL);
|
||||
if (!gemini_clk_data)
|
||||
return;
|
||||
|
||||
|
|
|
@ -1201,9 +1201,8 @@ static void __init stm32h7_rcc_init(struct device_node *np)
|
|||
const char *hse_clk, *lse_clk, *i2s_clk;
|
||||
struct regmap *pdrm;
|
||||
|
||||
clk_data = kzalloc(sizeof(*clk_data) +
|
||||
sizeof(*clk_data->hws) * STM32H7_MAX_CLKS,
|
||||
GFP_KERNEL);
|
||||
clk_data = kzalloc(struct_size(clk_data, hws, STM32H7_MAX_CLKS),
|
||||
GFP_KERNEL);
|
||||
if (!clk_data)
|
||||
return;
|
||||
|
||||
|
|
|
@ -2060,9 +2060,8 @@ static int stm32_rcc_init(struct device_node *np,
|
|||
|
||||
max_binding = data->maxbinding;
|
||||
|
||||
clk_data = kzalloc(sizeof(*clk_data) +
|
||||
sizeof(*clk_data->hws) * max_binding,
|
||||
GFP_KERNEL);
|
||||
clk_data = kzalloc(struct_size(clk_data, hws, max_binding),
|
||||
GFP_KERNEL);
|
||||
if (!clk_data)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -61,8 +61,7 @@ static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
|
|||
int ret;
|
||||
int i;
|
||||
|
||||
clkout = kzalloc(sizeof(*clkout) +
|
||||
sizeof(*clkout->data.hws) * EXYNOS_CLKOUT_NR_CLKS,
|
||||
clkout = kzalloc(struct_size(clkout, data.hws, EXYNOS_CLKOUT_NR_CLKS),
|
||||
GFP_KERNEL);
|
||||
if (!clkout)
|
||||
return;
|
||||
|
|
|
@ -594,7 +594,7 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
|
|||
if (!count)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL);
|
||||
dev_dax = kzalloc(struct_size(dev_dax, res, count), GFP_KERNEL);
|
||||
if (!dev_dax)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
|
@ -1074,8 +1074,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
|||
return NULL;
|
||||
}
|
||||
|
||||
edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]),
|
||||
GFP_ATOMIC);
|
||||
edesc = kzalloc(struct_size(edesc, pset, sg_len), GFP_ATOMIC);
|
||||
if (!edesc)
|
||||
return NULL;
|
||||
|
||||
|
@ -1192,8 +1191,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
|
|||
nslots = 2;
|
||||
}
|
||||
|
||||
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
|
||||
GFP_ATOMIC);
|
||||
edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
|
||||
if (!edesc)
|
||||
return NULL;
|
||||
|
||||
|
@ -1315,8 +1313,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
|
|||
}
|
||||
}
|
||||
|
||||
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
|
||||
GFP_ATOMIC);
|
||||
edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
|
||||
if (!edesc)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -309,7 +309,7 @@ static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
|
|||
return NULL;
|
||||
}
|
||||
|
||||
d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
|
||||
d = kzalloc(struct_size(d, sg, sg_len), GFP_ATOMIC);
|
||||
if (!d)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -917,7 +917,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
|
|||
}
|
||||
|
||||
/* Now allocate and setup the descriptor. */
|
||||
d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
|
||||
d = kzalloc(struct_size(d, sg, sglen), GFP_ATOMIC);
|
||||
if (!d)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -557,7 +557,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
|
|||
}
|
||||
}
|
||||
|
||||
txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
|
||||
txd = kzalloc(struct_size(txd, sg, j), GFP_ATOMIC);
|
||||
if (!txd) {
|
||||
dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
|
||||
return NULL;
|
||||
|
@ -627,7 +627,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
|
|||
if (sglen == 0)
|
||||
return NULL;
|
||||
|
||||
txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC);
|
||||
txd = kzalloc(struct_size(txd, sg, sglen), GFP_ATOMIC);
|
||||
if (!txd) {
|
||||
dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
|
||||
return NULL;
|
||||
|
|
|
@ -269,7 +269,7 @@ static int usb_dmac_desc_alloc(struct usb_dmac_chan *chan, unsigned int sg_len,
|
|||
struct usb_dmac_desc *desc;
|
||||
unsigned long flags;
|
||||
|
||||
desc = kzalloc(sizeof(*desc) + sg_len * sizeof(desc->sg[0]), gfp);
|
||||
desc = kzalloc(struct_size(desc, sg, sg_len), gfp);
|
||||
if (!desc)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -112,8 +112,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
|
|||
{
|
||||
struct fw_node *node;
|
||||
|
||||
node = kzalloc(sizeof(*node) + port_count * sizeof(node->ports[0]),
|
||||
GFP_ATOMIC);
|
||||
node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC);
|
||||
if (node == NULL)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -4022,8 +4022,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
|
|||
if (count < 0)
|
||||
return ERR_PTR(count);
|
||||
|
||||
descs = kzalloc(sizeof(*descs) + sizeof(descs->desc[0]) * count,
|
||||
GFP_KERNEL);
|
||||
descs = kzalloc(struct_size(descs, desc, count), GFP_KERNEL);
|
||||
if (!descs)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
|
@ -779,8 +779,8 @@ nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
|
|||
|
||||
sdom = spec;
|
||||
while (sdom->signal_nr) {
|
||||
dom = kzalloc(sizeof(*dom) + sdom->signal_nr *
|
||||
sizeof(*dom->signal), GFP_KERNEL);
|
||||
dom = kzalloc(struct_size(dom, signal, sdom->signal_nr),
|
||||
GFP_KERNEL);
|
||||
if (!dom)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
|
|||
|
||||
num_locks = i * 32; /* actual number of locks in this device */
|
||||
|
||||
bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL);
|
||||
bank = kzalloc(struct_size(bank, lock, num_locks), GFP_KERNEL);
|
||||
if (!bank) {
|
||||
ret = -ENOMEM;
|
||||
goto iounmap_base;
|
||||
|
|
|
@ -119,7 +119,7 @@ static int u8500_hsem_probe(struct platform_device *pdev)
|
|||
/* clear all interrupts */
|
||||
writel(0xFFFF, io_base + HSEM_ICRALL);
|
||||
|
||||
bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL);
|
||||
bank = kzalloc(struct_size(bank, lock, num_locks), GFP_KERNEL);
|
||||
if (!bank) {
|
||||
ret = -ENOMEM;
|
||||
goto iounmap_base;
|
||||
|
|
|
@ -1157,8 +1157,9 @@ static void ib_cache_update(struct ib_device *device,
|
|||
goto err;
|
||||
}
|
||||
|
||||
pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
|
||||
sizeof *pkey_cache->table, GFP_KERNEL);
|
||||
pkey_cache = kmalloc(struct_size(pkey_cache, table,
|
||||
tprops->pkey_tbl_len),
|
||||
GFP_KERNEL);
|
||||
if (!pkey_cache)
|
||||
goto err;
|
||||
|
||||
|
|
|
@ -4298,8 +4298,8 @@ static void cm_add_one(struct ib_device *ib_device)
|
|||
int count = 0;
|
||||
u8 i;
|
||||
|
||||
cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
|
||||
ib_device->phys_port_cnt, GFP_KERNEL);
|
||||
cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
|
||||
GFP_KERNEL);
|
||||
if (!cm_dev)
|
||||
return;
|
||||
|
||||
|
|
|
@ -813,7 +813,7 @@ static void mcast_add_one(struct ib_device *device)
|
|||
int i;
|
||||
int count = 0;
|
||||
|
||||
dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
|
||||
dev = kmalloc(struct_size(dev, port, device->phys_port_cnt),
|
||||
GFP_KERNEL);
|
||||
if (!dev)
|
||||
return;
|
||||
|
|
|
@ -2756,8 +2756,8 @@ static struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
|
|||
struct ib_uflow_resources *resources;
|
||||
|
||||
resources =
|
||||
kmalloc(sizeof(*resources) +
|
||||
num_specs * sizeof(*resources->collection), GFP_KERNEL);
|
||||
kmalloc(struct_size(resources, collection, num_specs),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!resources)
|
||||
return NULL;
|
||||
|
|
|
@ -297,8 +297,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me
|
|||
if (max_attr_buckets >= 0)
|
||||
num_attr_buckets = max_attr_buckets + 1;
|
||||
|
||||
method = kzalloc(sizeof(*method) +
|
||||
num_attr_buckets * sizeof(*method->attr_buckets),
|
||||
method = kzalloc(struct_size(method, attr_buckets, num_attr_buckets),
|
||||
GFP_KERNEL);
|
||||
if (!method)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -446,9 +445,9 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_
|
|||
if (max_method_buckets >= 0)
|
||||
num_method_buckets = max_method_buckets + 1;
|
||||
|
||||
object = kzalloc(sizeof(*object) +
|
||||
num_method_buckets *
|
||||
sizeof(*object->method_buckets), GFP_KERNEL);
|
||||
object = kzalloc(struct_size(object, method_buckets,
|
||||
num_method_buckets),
|
||||
GFP_KERNEL);
|
||||
if (!object)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -469,8 +468,8 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_
|
|||
if (methods_max_bucket < 0)
|
||||
continue;
|
||||
|
||||
hash = kzalloc(sizeof(*hash) +
|
||||
sizeof(*hash->methods) * (methods_max_bucket + 1),
|
||||
hash = kzalloc(struct_size(hash, methods,
|
||||
methods_max_bucket + 1),
|
||||
GFP_KERNEL);
|
||||
if (!hash) {
|
||||
res = -ENOMEM;
|
||||
|
@ -579,8 +578,8 @@ struct uverbs_root_spec *uverbs_alloc_spec_tree(unsigned int num_trees,
|
|||
if (max_object_buckets >= 0)
|
||||
num_objects_buckets = max_object_buckets + 1;
|
||||
|
||||
root_spec = kzalloc(sizeof(*root_spec) +
|
||||
num_objects_buckets * sizeof(*root_spec->object_buckets),
|
||||
root_spec = kzalloc(struct_size(root_spec, object_buckets,
|
||||
num_objects_buckets),
|
||||
GFP_KERNEL);
|
||||
if (!root_spec)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -603,8 +602,8 @@ struct uverbs_root_spec *uverbs_alloc_spec_tree(unsigned int num_trees,
|
|||
if (objects_max_bucket < 0)
|
||||
continue;
|
||||
|
||||
hash = kzalloc(sizeof(*hash) +
|
||||
sizeof(*hash->objects) * (objects_max_bucket + 1),
|
||||
hash = kzalloc(struct_size(hash, objects,
|
||||
objects_max_bucket + 1),
|
||||
GFP_KERNEL);
|
||||
if (!hash) {
|
||||
res = -ENOMEM;
|
||||
|
|
|
@ -367,7 +367,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
|
|||
obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
|
||||
num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
|
||||
|
||||
table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
|
||||
table = kmalloc(struct_size(table, icm, num_icm), GFP_KERNEL);
|
||||
if (!table)
|
||||
return NULL;
|
||||
|
||||
|
@ -529,7 +529,7 @@ struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
|
|||
return NULL;
|
||||
|
||||
npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
|
||||
db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
|
||||
db_tab = kmalloc(struct_size(db_tab, page, npages), GFP_KERNEL);
|
||||
if (!db_tab)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
|
@ -283,7 +283,7 @@ static struct rvt_mr *__rvt_alloc_mr(int count, struct ib_pd *pd)
|
|||
|
||||
/* Allocate struct plus pointers to first level page tables. */
|
||||
m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
|
||||
mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
|
||||
mr = kzalloc(struct_size(mr, mr.map, m), GFP_KERNEL);
|
||||
if (!mr)
|
||||
goto bail;
|
||||
|
||||
|
@ -730,7 +730,7 @@ struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
|
|||
|
||||
/* Allocate struct plus pointers to first level page tables. */
|
||||
m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ;
|
||||
fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
|
||||
fmr = kzalloc(struct_size(fmr, mr.map, m), GFP_KERNEL);
|
||||
if (!fmr)
|
||||
goto bail;
|
||||
|
||||
|
|
|
@ -98,8 +98,7 @@ static int input_leds_connect(struct input_handler *handler,
|
|||
if (!num_leds)
|
||||
return -ENXIO;
|
||||
|
||||
leds = kzalloc(sizeof(*leds) + num_leds * sizeof(*leds->leds),
|
||||
GFP_KERNEL);
|
||||
leds = kzalloc(struct_size(leds, leds, num_leds), GFP_KERNEL);
|
||||
if (!leds)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots,
|
|||
if (mt)
|
||||
return mt->num_slots != num_slots ? -EINVAL : 0;
|
||||
|
||||
mt = kzalloc(sizeof(*mt) + num_slots * sizeof(*mt->slots), GFP_KERNEL);
|
||||
mt = kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL);
|
||||
if (!mt)
|
||||
goto err_mem;
|
||||
|
||||
|
|
|
@ -756,7 +756,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
|
|||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
|
||||
rs = kzalloc(struct_size(rs, dev, raid_devs), GFP_KERNEL);
|
||||
if (!rs) {
|
||||
ti->error = "Cannot allocate raid context";
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
|
|
@ -182,8 +182,7 @@ static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
|
|||
val = energy_quirk;
|
||||
}
|
||||
|
||||
func = kzalloc(sizeof(*func) + sizeof(*func->template) * num,
|
||||
GFP_KERNEL);
|
||||
func = kzalloc(struct_size(func, template, num), GFP_KERNEL);
|
||||
if (!func)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
|
@ -494,7 +494,7 @@ static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
|
|||
int err;
|
||||
int i;
|
||||
|
||||
d = kzalloc(sizeof(*d) + nfile * sizeof(d->fields[0]), GFP_KERNEL);
|
||||
d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL);
|
||||
if (!d)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1191,8 +1191,7 @@ static struct mlx5_flow_handle *alloc_handle(int num_rules)
|
|||
{
|
||||
struct mlx5_flow_handle *handle;
|
||||
|
||||
handle = kzalloc(sizeof(*handle) + sizeof(handle->rule[0]) *
|
||||
num_rules, GFP_KERNEL);
|
||||
handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
|
||||
if (!handle)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -2987,9 +2987,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
|
|||
|
||||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
|
||||
ptk_pn = kzalloc(sizeof(*ptk_pn) +
|
||||
mvm->trans->num_rx_queues *
|
||||
sizeof(ptk_pn->q[0]),
|
||||
ptk_pn = kzalloc(struct_size(ptk_pn, q,
|
||||
mvm->trans->num_rx_queues),
|
||||
GFP_KERNEL);
|
||||
if (!ptk_pn) {
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -236,8 +236,7 @@ int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
|
|||
|
||||
mt76_rx_aggr_stop(dev, wcid, tidno);
|
||||
|
||||
tid = kzalloc(sizeof(*tid) + size * sizeof(tid->reorder_buf[0]),
|
||||
GFP_KERNEL);
|
||||
tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL);
|
||||
if (!tid)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -730,8 +730,7 @@ of_reset_control_array_get(struct device_node *np, bool shared, bool optional)
|
|||
if (num < 0)
|
||||
return optional ? NULL : ERR_PTR(num);
|
||||
|
||||
resets = kzalloc(sizeof(*resets) + sizeof(resets->rstc[0]) * num,
|
||||
GFP_KERNEL);
|
||||
resets = kzalloc(struct_size(resets, rstc, num), GFP_KERNEL);
|
||||
if (!resets)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
|
@ -326,8 +326,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
|
|||
if (num_devices < 1)
|
||||
return -EINVAL;
|
||||
|
||||
gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
|
||||
GFP_KERNEL);
|
||||
gdev = kzalloc(struct_size(gdev, cdev, num_devices), GFP_KERNEL);
|
||||
if (!gdev)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -94,8 +94,8 @@ struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
|
|||
struct gb_module *module;
|
||||
int i;
|
||||
|
||||
module = kzalloc(sizeof(*module) + num_interfaces * sizeof(intf),
|
||||
GFP_KERNEL);
|
||||
module = kzalloc(struct_size(module, interfaces, num_interfaces),
|
||||
GFP_KERNEL);
|
||||
if (!module)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -1287,9 +1287,8 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
|
|||
}
|
||||
|
||||
/* allocate and initialize one new instance */
|
||||
midi = kzalloc(
|
||||
sizeof(*midi) + opts->in_ports * sizeof(*midi->in_ports_array),
|
||||
GFP_KERNEL);
|
||||
midi = kzalloc(struct_size(midi, in_ports_array, opts->in_ports),
|
||||
GFP_KERNEL);
|
||||
if (!midi) {
|
||||
status = -ENOMEM;
|
||||
goto setup_fail;
|
||||
|
|
|
@ -136,8 +136,7 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
|
|||
int error;
|
||||
|
||||
/* Initialize the Zorro bus */
|
||||
bus = kzalloc(sizeof(*bus) +
|
||||
zorro_num_autocon * sizeof(bus->devices[0]),
|
||||
bus = kzalloc(struct_size(bus, devices, zorro_num_autocon),
|
||||
GFP_KERNEL);
|
||||
if (!bus)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -43,8 +43,7 @@ struct afs_addr_list *afs_alloc_addrlist(unsigned int nr,
|
|||
|
||||
_enter("%u,%u,%u", nr, service, port);
|
||||
|
||||
alist = kzalloc(sizeof(*alist) + sizeof(alist->addrs[0]) * nr,
|
||||
GFP_KERNEL);
|
||||
alist = kzalloc(struct_size(alist, addrs, nr), GFP_KERNEL);
|
||||
if (!alist)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -4775,8 +4775,8 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
|
|||
int ret;
|
||||
|
||||
/* allocate the cgroup and its ID, 0 is reserved for the root */
|
||||
cgrp = kzalloc(sizeof(*cgrp) +
|
||||
sizeof(cgrp->ancestor_ids[0]) * (level + 1), GFP_KERNEL);
|
||||
cgrp = kzalloc(struct_size(cgrp, ancestor_ids, (level + 1)),
|
||||
GFP_KERNEL);
|
||||
if (!cgrp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
|
@ -1604,8 +1604,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
|
|||
if (notes == 0)
|
||||
return;
|
||||
|
||||
notes_attrs = kzalloc(sizeof(*notes_attrs)
|
||||
+ notes * sizeof(notes_attrs->attrs[0]),
|
||||
notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes),
|
||||
GFP_KERNEL);
|
||||
if (notes_attrs == NULL)
|
||||
return;
|
||||
|
|
|
@ -3700,8 +3700,7 @@ apply_wqattrs_prepare(struct workqueue_struct *wq,
|
|||
|
||||
lockdep_assert_held(&wq_pool_mutex);
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx) + nr_node_ids * sizeof(ctx->pwq_tbl[0]),
|
||||
GFP_KERNEL);
|
||||
ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
|
||||
|
||||
new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
|
||||
tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
|
||||
|
|
|
@ -62,7 +62,7 @@ struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
|
|||
|
||||
if (num_mon > CEPH_MAX_MON)
|
||||
goto bad;
|
||||
m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS);
|
||||
m = kmalloc(struct_size(m, mon_inst, num_mon), GFP_NOFS);
|
||||
if (m == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
m->fsid = fsid;
|
||||
|
@ -1000,8 +1000,7 @@ static int build_initial_monmap(struct ceph_mon_client *monc)
|
|||
int i;
|
||||
|
||||
/* build initial monmap */
|
||||
monc->monmap = kzalloc(sizeof(*monc->monmap) +
|
||||
num_mon*sizeof(monc->monmap->mon_inst[0]),
|
||||
monc->monmap = kzalloc(struct_size(monc->monmap, mon_inst, num_mon),
|
||||
GFP_KERNEL);
|
||||
if (!monc->monmap)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -565,8 +565,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
|
|||
req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
|
||||
} else {
|
||||
BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
|
||||
req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]),
|
||||
gfp_flags);
|
||||
req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags);
|
||||
}
|
||||
if (unlikely(!req))
|
||||
return NULL;
|
||||
|
|
|
@ -184,8 +184,7 @@ recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
|
|||
}
|
||||
|
||||
nstamps_max += 1;
|
||||
e = kmalloc(sizeof(*e) + sizeof(e->stamps[0]) * nstamps_max,
|
||||
GFP_ATOMIC);
|
||||
e = kmalloc(struct_size(e, stamps, nstamps_max), GFP_ATOMIC);
|
||||
if (e == NULL)
|
||||
return NULL;
|
||||
memcpy(&e->addr, addr, sizeof(e->addr));
|
||||
|
|
|
@ -73,8 +73,8 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
|
|||
* variables. There are arrays that we encode directly
|
||||
* into parameters to make the rest of the operations easier.
|
||||
*/
|
||||
auth_hmacs = kzalloc(sizeof(*auth_hmacs) +
|
||||
sizeof(__u16) * SCTP_AUTH_NUM_HMACS, gfp);
|
||||
auth_hmacs = kzalloc(struct_size(auth_hmacs, hmac_ids,
|
||||
SCTP_AUTH_NUM_HMACS), gfp);
|
||||
if (!auth_hmacs)
|
||||
goto nomem;
|
||||
|
||||
|
|
|
@ -259,8 +259,8 @@ int _snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave,
|
|||
struct link_master *master_link = snd_kcontrol_chip(master);
|
||||
struct link_slave *srec;
|
||||
|
||||
srec = kzalloc(sizeof(*srec) +
|
||||
slave->count * sizeof(*slave->vd), GFP_KERNEL);
|
||||
srec = kzalloc(struct_size(srec, slave.vd, slave->count),
|
||||
GFP_KERNEL);
|
||||
if (!srec)
|
||||
return -ENOMEM;
|
||||
srec->kctl = slave;
|
||||
|
|
|
@ -1088,7 +1088,7 @@ static int dapm_widget_list_create(struct snd_soc_dapm_widget_list **list,
|
|||
list_for_each(it, widgets)
|
||||
size++;
|
||||
|
||||
*list = kzalloc(sizeof(**list) + size * sizeof(*w), GFP_KERNEL);
|
||||
*list = kzalloc(struct_size(*list, widgets, size), GFP_KERNEL);
|
||||
if (*list == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче