staging: spectra: optimize kmalloc to kzalloc
Use kzalloc rather than kmalloc followed by memset with 0. Found by coccinelle. Signed-off-by: Alexander Beregalov <a.beregalov@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Родитель
2f48131636
Коммит
fb44022f18
|
@ -428,10 +428,9 @@ static int allocate_memory(void)
|
||||||
DeviceInfo.wPageDataSize;
|
DeviceInfo.wPageDataSize;
|
||||||
|
|
||||||
/* Malloc memory for block tables */
|
/* Malloc memory for block tables */
|
||||||
g_pBlockTable = kmalloc(block_table_size, GFP_ATOMIC);
|
g_pBlockTable = kzalloc(block_table_size, GFP_ATOMIC);
|
||||||
if (!g_pBlockTable)
|
if (!g_pBlockTable)
|
||||||
goto block_table_fail;
|
goto block_table_fail;
|
||||||
memset(g_pBlockTable, 0, block_table_size);
|
|
||||||
total_bytes += block_table_size;
|
total_bytes += block_table_size;
|
||||||
|
|
||||||
g_pWearCounter = (u8 *)(g_pBlockTable +
|
g_pWearCounter = (u8 *)(g_pBlockTable +
|
||||||
|
@ -447,19 +446,17 @@ static int allocate_memory(void)
|
||||||
Cache.array[i].address = NAND_CACHE_INIT_ADDR;
|
Cache.array[i].address = NAND_CACHE_INIT_ADDR;
|
||||||
Cache.array[i].use_cnt = 0;
|
Cache.array[i].use_cnt = 0;
|
||||||
Cache.array[i].changed = CLEAR;
|
Cache.array[i].changed = CLEAR;
|
||||||
Cache.array[i].buf = kmalloc(Cache.cache_item_size,
|
Cache.array[i].buf = kzalloc(Cache.cache_item_size,
|
||||||
GFP_ATOMIC);
|
GFP_ATOMIC);
|
||||||
if (!Cache.array[i].buf)
|
if (!Cache.array[i].buf)
|
||||||
goto cache_item_fail;
|
goto cache_item_fail;
|
||||||
memset(Cache.array[i].buf, 0, Cache.cache_item_size);
|
|
||||||
total_bytes += Cache.cache_item_size;
|
total_bytes += Cache.cache_item_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Malloc memory for IPF */
|
/* Malloc memory for IPF */
|
||||||
g_pIPF = kmalloc(page_size, GFP_ATOMIC);
|
g_pIPF = kzalloc(page_size, GFP_ATOMIC);
|
||||||
if (!g_pIPF)
|
if (!g_pIPF)
|
||||||
goto ipf_fail;
|
goto ipf_fail;
|
||||||
memset(g_pIPF, 0, page_size);
|
|
||||||
total_bytes += page_size;
|
total_bytes += page_size;
|
||||||
|
|
||||||
/* Malloc memory for data merging during Level2 Cache flush */
|
/* Malloc memory for data merging during Level2 Cache flush */
|
||||||
|
@ -476,10 +473,9 @@ static int allocate_memory(void)
|
||||||
total_bytes += block_size;
|
total_bytes += block_size;
|
||||||
|
|
||||||
/* Malloc memory for temp buffer */
|
/* Malloc memory for temp buffer */
|
||||||
g_pTempBuf = kmalloc(Cache.cache_item_size, GFP_ATOMIC);
|
g_pTempBuf = kzalloc(Cache.cache_item_size, GFP_ATOMIC);
|
||||||
if (!g_pTempBuf)
|
if (!g_pTempBuf)
|
||||||
goto Temp_buf_fail;
|
goto Temp_buf_fail;
|
||||||
memset(g_pTempBuf, 0, Cache.cache_item_size);
|
|
||||||
total_bytes += Cache.cache_item_size;
|
total_bytes += Cache.cache_item_size;
|
||||||
|
|
||||||
/* Malloc memory for block table blocks */
|
/* Malloc memory for block table blocks */
|
||||||
|
@ -589,10 +585,9 @@ static int allocate_memory(void)
|
||||||
total_bytes += block_size;
|
total_bytes += block_size;
|
||||||
|
|
||||||
/* Malloc memory for copy of block table used in CDMA mode */
|
/* Malloc memory for copy of block table used in CDMA mode */
|
||||||
g_pBTStartingCopy = kmalloc(block_table_size, GFP_ATOMIC);
|
g_pBTStartingCopy = kzalloc(block_table_size, GFP_ATOMIC);
|
||||||
if (!g_pBTStartingCopy)
|
if (!g_pBTStartingCopy)
|
||||||
goto bt_starting_copy;
|
goto bt_starting_copy;
|
||||||
memset(g_pBTStartingCopy, 0, block_table_size);
|
|
||||||
total_bytes += block_table_size;
|
total_bytes += block_table_size;
|
||||||
|
|
||||||
g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy +
|
g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy +
|
||||||
|
@ -608,28 +603,25 @@ static int allocate_memory(void)
|
||||||
5 * DeviceInfo.wDataBlockNum * sizeof(u8);
|
5 * DeviceInfo.wDataBlockNum * sizeof(u8);
|
||||||
if (DeviceInfo.MLCDevice)
|
if (DeviceInfo.MLCDevice)
|
||||||
mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16);
|
mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16);
|
||||||
g_pBlockTableCopies = kmalloc(mem_size, GFP_ATOMIC);
|
g_pBlockTableCopies = kzalloc(mem_size, GFP_ATOMIC);
|
||||||
if (!g_pBlockTableCopies)
|
if (!g_pBlockTableCopies)
|
||||||
goto blk_table_copies_fail;
|
goto blk_table_copies_fail;
|
||||||
memset(g_pBlockTableCopies, 0, mem_size);
|
|
||||||
total_bytes += mem_size;
|
total_bytes += mem_size;
|
||||||
g_pNextBlockTable = g_pBlockTableCopies;
|
g_pNextBlockTable = g_pBlockTableCopies;
|
||||||
|
|
||||||
/* Malloc memory for Block Table Delta */
|
/* Malloc memory for Block Table Delta */
|
||||||
mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta);
|
mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta);
|
||||||
g_pBTDelta = kmalloc(mem_size, GFP_ATOMIC);
|
g_pBTDelta = kzalloc(mem_size, GFP_ATOMIC);
|
||||||
if (!g_pBTDelta)
|
if (!g_pBTDelta)
|
||||||
goto bt_delta_fail;
|
goto bt_delta_fail;
|
||||||
memset(g_pBTDelta, 0, mem_size);
|
|
||||||
total_bytes += mem_size;
|
total_bytes += mem_size;
|
||||||
g_pBTDelta_Free = g_pBTDelta;
|
g_pBTDelta_Free = g_pBTDelta;
|
||||||
|
|
||||||
/* Malloc memory for Copy Back Buffers */
|
/* Malloc memory for Copy Back Buffers */
|
||||||
for (j = 0; j < COPY_BACK_BUF_NUM; j++) {
|
for (j = 0; j < COPY_BACK_BUF_NUM; j++) {
|
||||||
cp_back_buf_copies[j] = kmalloc(block_size, GFP_ATOMIC);
|
cp_back_buf_copies[j] = kzalloc(block_size, GFP_ATOMIC);
|
||||||
if (!cp_back_buf_copies[j])
|
if (!cp_back_buf_copies[j])
|
||||||
goto cp_back_buf_copies_fail;
|
goto cp_back_buf_copies_fail;
|
||||||
memset(cp_back_buf_copies[j], 0, block_size);
|
|
||||||
total_bytes += block_size;
|
total_bytes += block_size;
|
||||||
}
|
}
|
||||||
cp_back_buf_idx = 0;
|
cp_back_buf_idx = 0;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче