tree wide: use kvfree() than conditional kfree()/vfree()
There are many locations that do if (memory_was_allocated_by_vmalloc) vfree(ptr); else kfree(ptr); but kvfree() can handle both kmalloc()ed memory and vmalloc()ed memory using is_vmalloc_addr(). Unless callers have special reasons, we can replace this branch with kvfree(). Please check and reply if you found problems. Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Jan Kara <jack@suse.com> Acked-by: Russell King <rmk+kernel@arm.linux.org.uk> Reviewed-by: Andreas Dilger <andreas.dilger@intel.com> Acked-by: "Rafael J. Wysocki" <rjw@rjwysocki.net> Acked-by: David Rientjes <rientjes@google.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Oleg Drokin <oleg.drokin@intel.com> Cc: Boris Petkov <bp@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
eab95db69d
Коммит
1d5cfdb076
|
@ -1200,10 +1200,7 @@ error:
|
||||||
while (i--)
|
while (i--)
|
||||||
if (pages[i])
|
if (pages[i])
|
||||||
__free_pages(pages[i], 0);
|
__free_pages(pages[i], 0);
|
||||||
if (array_size <= PAGE_SIZE)
|
kvfree(pages);
|
||||||
kfree(pages);
|
|
||||||
else
|
|
||||||
vfree(pages);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1211,7 +1208,6 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages,
|
||||||
size_t size, struct dma_attrs *attrs)
|
size_t size, struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
int count = size >> PAGE_SHIFT;
|
int count = size >> PAGE_SHIFT;
|
||||||
int array_size = count * sizeof(struct page *);
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
|
if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
|
||||||
|
@ -1222,10 +1218,7 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages,
|
||||||
__free_pages(pages[i], 0);
|
__free_pages(pages[i], 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (array_size <= PAGE_SIZE)
|
kvfree(pages);
|
||||||
kfree(pages);
|
|
||||||
else
|
|
||||||
vfree(pages);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#include <linux/hardirq.h>
|
#include <linux/hardirq.h>
|
||||||
#include <linux/pstore.h>
|
#include <linux/pstore.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
|
#include <linux/mm.h> /* kvfree() */
|
||||||
#include <acpi/apei.h>
|
#include <acpi/apei.h>
|
||||||
|
|
||||||
#include "apei-internal.h"
|
#include "apei-internal.h"
|
||||||
|
@ -532,10 +533,7 @@ retry:
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
memcpy(new_entries, entries,
|
memcpy(new_entries, entries,
|
||||||
erst_record_id_cache.len * sizeof(entries[0]));
|
erst_record_id_cache.len * sizeof(entries[0]));
|
||||||
if (erst_record_id_cache.size < PAGE_SIZE)
|
kvfree(entries);
|
||||||
kfree(entries);
|
|
||||||
else
|
|
||||||
vfree(entries);
|
|
||||||
erst_record_id_cache.entries = entries = new_entries;
|
erst_record_id_cache.entries = entries = new_entries;
|
||||||
erst_record_id_cache.size = new_size;
|
erst_record_id_cache.size = new_size;
|
||||||
}
|
}
|
||||||
|
|
|
@ -364,12 +364,9 @@ static void bm_free_pages(struct page **pages, unsigned long number)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bm_vk_free(void *ptr, int v)
|
static inline void bm_vk_free(void *ptr)
|
||||||
{
|
{
|
||||||
if (v)
|
kvfree(ptr);
|
||||||
vfree(ptr);
|
|
||||||
else
|
|
||||||
kfree(ptr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -379,7 +376,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
|
||||||
{
|
{
|
||||||
struct page **old_pages = b->bm_pages;
|
struct page **old_pages = b->bm_pages;
|
||||||
struct page **new_pages, *page;
|
struct page **new_pages, *page;
|
||||||
unsigned int i, bytes, vmalloced = 0;
|
unsigned int i, bytes;
|
||||||
unsigned long have = b->bm_number_of_pages;
|
unsigned long have = b->bm_number_of_pages;
|
||||||
|
|
||||||
BUG_ON(have == 0 && old_pages != NULL);
|
BUG_ON(have == 0 && old_pages != NULL);
|
||||||
|
@ -401,7 +398,6 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
|
||||||
PAGE_KERNEL);
|
PAGE_KERNEL);
|
||||||
if (!new_pages)
|
if (!new_pages)
|
||||||
return NULL;
|
return NULL;
|
||||||
vmalloced = 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (want >= have) {
|
if (want >= have) {
|
||||||
|
@ -411,7 +407,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
|
||||||
page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
|
page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
bm_free_pages(new_pages + have, i - have);
|
bm_free_pages(new_pages + have, i - have);
|
||||||
bm_vk_free(new_pages, vmalloced);
|
bm_vk_free(new_pages);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
/* we want to know which page it is
|
/* we want to know which page it is
|
||||||
|
@ -427,11 +423,6 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vmalloced)
|
|
||||||
b->bm_flags |= BM_P_VMALLOCED;
|
|
||||||
else
|
|
||||||
b->bm_flags &= ~BM_P_VMALLOCED;
|
|
||||||
|
|
||||||
return new_pages;
|
return new_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -469,7 +460,7 @@ void drbd_bm_cleanup(struct drbd_device *device)
|
||||||
if (!expect(device->bitmap))
|
if (!expect(device->bitmap))
|
||||||
return;
|
return;
|
||||||
bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages);
|
bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages);
|
||||||
bm_vk_free(device->bitmap->bm_pages, (BM_P_VMALLOCED & device->bitmap->bm_flags));
|
bm_vk_free(device->bitmap->bm_pages);
|
||||||
kfree(device->bitmap);
|
kfree(device->bitmap);
|
||||||
device->bitmap = NULL;
|
device->bitmap = NULL;
|
||||||
}
|
}
|
||||||
|
@ -643,7 +634,6 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
|
||||||
unsigned long want, have, onpages; /* number of pages */
|
unsigned long want, have, onpages; /* number of pages */
|
||||||
struct page **npages, **opages = NULL;
|
struct page **npages, **opages = NULL;
|
||||||
int err = 0, growing;
|
int err = 0, growing;
|
||||||
int opages_vmalloced;
|
|
||||||
|
|
||||||
if (!expect(b))
|
if (!expect(b))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -656,8 +646,6 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
|
||||||
if (capacity == b->bm_dev_capacity)
|
if (capacity == b->bm_dev_capacity)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);
|
|
||||||
|
|
||||||
if (capacity == 0) {
|
if (capacity == 0) {
|
||||||
spin_lock_irq(&b->bm_lock);
|
spin_lock_irq(&b->bm_lock);
|
||||||
opages = b->bm_pages;
|
opages = b->bm_pages;
|
||||||
|
@ -671,7 +659,7 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
|
||||||
b->bm_dev_capacity = 0;
|
b->bm_dev_capacity = 0;
|
||||||
spin_unlock_irq(&b->bm_lock);
|
spin_unlock_irq(&b->bm_lock);
|
||||||
bm_free_pages(opages, onpages);
|
bm_free_pages(opages, onpages);
|
||||||
bm_vk_free(opages, opages_vmalloced);
|
bm_vk_free(opages);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
|
bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
|
||||||
|
@ -744,7 +732,7 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
|
||||||
|
|
||||||
spin_unlock_irq(&b->bm_lock);
|
spin_unlock_irq(&b->bm_lock);
|
||||||
if (opages != npages)
|
if (opages != npages)
|
||||||
bm_vk_free(opages, opages_vmalloced);
|
bm_vk_free(opages);
|
||||||
if (!growing)
|
if (!growing)
|
||||||
b->bm_set = bm_count_bits(b);
|
b->bm_set = bm_count_bits(b);
|
||||||
drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
|
drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
|
||||||
|
|
|
@ -536,9 +536,6 @@ struct drbd_bitmap; /* opaque for drbd_device */
|
||||||
/* definition of bits in bm_flags to be used in drbd_bm_lock
|
/* definition of bits in bm_flags to be used in drbd_bm_lock
|
||||||
* and drbd_bitmap_io and friends. */
|
* and drbd_bitmap_io and friends. */
|
||||||
enum bm_flag {
|
enum bm_flag {
|
||||||
/* do we need to kfree, or vfree bm_pages? */
|
|
||||||
BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */
|
|
||||||
|
|
||||||
/* currently locked for bulk operation */
|
/* currently locked for bulk operation */
|
||||||
BM_LOCKED_MASK = 0xf,
|
BM_LOCKED_MASK = 0xf,
|
||||||
|
|
||||||
|
|
|
@ -93,14 +93,11 @@ struct vma_data {
|
||||||
spinlock_t lock; /* Serialize access to this structure. */
|
spinlock_t lock; /* Serialize access to this structure. */
|
||||||
int count; /* Number of pages allocated. */
|
int count; /* Number of pages allocated. */
|
||||||
enum mspec_page_type type; /* Type of pages allocated. */
|
enum mspec_page_type type; /* Type of pages allocated. */
|
||||||
int flags; /* See VMD_xxx below. */
|
|
||||||
unsigned long vm_start; /* Original (unsplit) base. */
|
unsigned long vm_start; /* Original (unsplit) base. */
|
||||||
unsigned long vm_end; /* Original (unsplit) end. */
|
unsigned long vm_end; /* Original (unsplit) end. */
|
||||||
unsigned long maddr[0]; /* Array of MSPEC addresses. */
|
unsigned long maddr[0]; /* Array of MSPEC addresses. */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define VMD_VMALLOCED 0x1 /* vmalloc'd rather than kmalloc'd */
|
|
||||||
|
|
||||||
/* used on shub2 to clear FOP cache in the HUB */
|
/* used on shub2 to clear FOP cache in the HUB */
|
||||||
static unsigned long scratch_page[MAX_NUMNODES];
|
static unsigned long scratch_page[MAX_NUMNODES];
|
||||||
#define SH2_AMO_CACHE_ENTRIES 4
|
#define SH2_AMO_CACHE_ENTRIES 4
|
||||||
|
@ -185,10 +182,7 @@ mspec_close(struct vm_area_struct *vma)
|
||||||
"failed to zero page %ld\n", my_page);
|
"failed to zero page %ld\n", my_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vdata->flags & VMD_VMALLOCED)
|
kvfree(vdata);
|
||||||
vfree(vdata);
|
|
||||||
else
|
|
||||||
kfree(vdata);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -256,7 +250,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
|
||||||
enum mspec_page_type type)
|
enum mspec_page_type type)
|
||||||
{
|
{
|
||||||
struct vma_data *vdata;
|
struct vma_data *vdata;
|
||||||
int pages, vdata_size, flags = 0;
|
int pages, vdata_size;
|
||||||
|
|
||||||
if (vma->vm_pgoff != 0)
|
if (vma->vm_pgoff != 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -271,16 +265,13 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
|
||||||
vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
|
vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
|
||||||
if (vdata_size <= PAGE_SIZE)
|
if (vdata_size <= PAGE_SIZE)
|
||||||
vdata = kzalloc(vdata_size, GFP_KERNEL);
|
vdata = kzalloc(vdata_size, GFP_KERNEL);
|
||||||
else {
|
else
|
||||||
vdata = vzalloc(vdata_size);
|
vdata = vzalloc(vdata_size);
|
||||||
flags = VMD_VMALLOCED;
|
|
||||||
}
|
|
||||||
if (!vdata)
|
if (!vdata)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
vdata->vm_start = vma->vm_start;
|
vdata->vm_start = vma->vm_start;
|
||||||
vdata->vm_end = vma->vm_end;
|
vdata->vm_end = vma->vm_end;
|
||||||
vdata->flags = flags;
|
|
||||||
vdata->type = type;
|
vdata->type = type;
|
||||||
spin_lock_init(&vdata->lock);
|
spin_lock_init(&vdata->lock);
|
||||||
atomic_set(&vdata->refcnt, 1);
|
atomic_set(&vdata->refcnt, 1);
|
||||||
|
|
|
@ -198,10 +198,7 @@ EXPORT_SYMBOL(drm_ht_remove_item);
|
||||||
void drm_ht_remove(struct drm_open_hash *ht)
|
void drm_ht_remove(struct drm_open_hash *ht)
|
||||||
{
|
{
|
||||||
if (ht->table) {
|
if (ht->table) {
|
||||||
if ((PAGE_SIZE / sizeof(*ht->table)) >> ht->order)
|
kvfree(ht->table);
|
||||||
kfree(ht->table);
|
|
||||||
else
|
|
||||||
vfree(ht->table);
|
|
||||||
ht->table = NULL;
|
ht->table = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -151,16 +151,12 @@ do { \
|
||||||
|
|
||||||
#define LIBCFS_FREE(ptr, size) \
|
#define LIBCFS_FREE(ptr, size) \
|
||||||
do { \
|
do { \
|
||||||
int s = (size); \
|
|
||||||
if (unlikely((ptr) == NULL)) { \
|
if (unlikely((ptr) == NULL)) { \
|
||||||
CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
|
CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
|
||||||
"%s:%d\n", s, __FILE__, __LINE__); \
|
"%s:%d\n", (int)(size), __FILE__, __LINE__); \
|
||||||
break; \
|
break; \
|
||||||
} \
|
} \
|
||||||
if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \
|
kvfree(ptr); \
|
||||||
vfree(ptr); \
|
|
||||||
else \
|
|
||||||
kfree(ptr); \
|
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
|
@ -72,8 +72,7 @@ void coda_sysctl_clean(void);
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
|
||||||
#define CODA_FREE(ptr,size) \
|
#define CODA_FREE(ptr, size) kvfree((ptr))
|
||||||
do { if (size < PAGE_SIZE) kfree((ptr)); else vfree((ptr)); } while (0)
|
|
||||||
|
|
||||||
/* inode to cnode access functions */
|
/* inode to cnode access functions */
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/mtd/mtd.h>
|
#include <linux/mtd/mtd.h>
|
||||||
|
#include <linux/mm.h> /* kvfree() */
|
||||||
#include "nodelist.h"
|
#include "nodelist.h"
|
||||||
|
|
||||||
static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *,
|
static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *,
|
||||||
|
@ -383,12 +384,7 @@ int jffs2_do_mount_fs(struct jffs2_sb_info *c)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_free:
|
out_free:
|
||||||
#ifndef __ECOS
|
kvfree(c->blocks);
|
||||||
if (jffs2_blocks_use_vmalloc(c))
|
|
||||||
vfree(c->blocks);
|
|
||||||
else
|
|
||||||
#endif
|
|
||||||
kfree(c->blocks);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -596,10 +596,7 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
out_root:
|
out_root:
|
||||||
jffs2_free_ino_caches(c);
|
jffs2_free_ino_caches(c);
|
||||||
jffs2_free_raw_node_refs(c);
|
jffs2_free_raw_node_refs(c);
|
||||||
if (jffs2_blocks_use_vmalloc(c))
|
kvfree(c->blocks);
|
||||||
vfree(c->blocks);
|
|
||||||
else
|
|
||||||
kfree(c->blocks);
|
|
||||||
out_inohash:
|
out_inohash:
|
||||||
jffs2_clear_xattr_subsystem(c);
|
jffs2_clear_xattr_subsystem(c);
|
||||||
kfree(c->inocache_list);
|
kfree(c->inocache_list);
|
||||||
|
|
|
@ -331,10 +331,7 @@ static void jffs2_put_super (struct super_block *sb)
|
||||||
|
|
||||||
jffs2_free_ino_caches(c);
|
jffs2_free_ino_caches(c);
|
||||||
jffs2_free_raw_node_refs(c);
|
jffs2_free_raw_node_refs(c);
|
||||||
if (jffs2_blocks_use_vmalloc(c))
|
kvfree(c->blocks);
|
||||||
vfree(c->blocks);
|
|
||||||
else
|
|
||||||
kfree(c->blocks);
|
|
||||||
jffs2_flash_cleanup(c);
|
jffs2_flash_cleanup(c);
|
||||||
kfree(c->inocache_list);
|
kfree(c->inocache_list);
|
||||||
jffs2_clear_xattr_subsystem(c);
|
jffs2_clear_xattr_subsystem(c);
|
||||||
|
|
|
@ -279,17 +279,12 @@ static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int nr_groups = bitmap->s_nr_groups;
|
int nr_groups = bitmap->s_nr_groups;
|
||||||
int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) *
|
|
||||||
nr_groups);
|
|
||||||
|
|
||||||
for (i = 0; i < nr_groups; i++)
|
for (i = 0; i < nr_groups; i++)
|
||||||
if (bitmap->s_block_bitmap[i])
|
if (bitmap->s_block_bitmap[i])
|
||||||
brelse(bitmap->s_block_bitmap[i]);
|
brelse(bitmap->s_block_bitmap[i]);
|
||||||
|
|
||||||
if (size <= PAGE_SIZE)
|
kvfree(bitmap);
|
||||||
kfree(bitmap);
|
|
||||||
else
|
|
||||||
vfree(bitmap);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void udf_free_partition(struct udf_part_map *map)
|
static void udf_free_partition(struct udf_part_map *map)
|
||||||
|
|
|
@ -1493,7 +1493,7 @@ out_rcu_wakeup:
|
||||||
wake_up_sem_queue_do(&tasks);
|
wake_up_sem_queue_do(&tasks);
|
||||||
out_free:
|
out_free:
|
||||||
if (sem_io != fast_sem_io)
|
if (sem_io != fast_sem_io)
|
||||||
ipc_free(sem_io, sizeof(ushort)*nsems);
|
ipc_free(sem_io);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
11
ipc/util.c
11
ipc/util.c
|
@ -414,17 +414,12 @@ void *ipc_alloc(int size)
|
||||||
/**
|
/**
|
||||||
* ipc_free - free ipc space
|
* ipc_free - free ipc space
|
||||||
* @ptr: pointer returned by ipc_alloc
|
* @ptr: pointer returned by ipc_alloc
|
||||||
* @size: size of block
|
|
||||||
*
|
*
|
||||||
* Free a block created with ipc_alloc(). The caller must know the size
|
* Free a block created with ipc_alloc().
|
||||||
* used in the allocation call.
|
|
||||||
*/
|
*/
|
||||||
void ipc_free(void *ptr, int size)
|
void ipc_free(void *ptr)
|
||||||
{
|
{
|
||||||
if (size > PAGE_SIZE)
|
kvfree(ptr);
|
||||||
vfree(ptr);
|
|
||||||
else
|
|
||||||
kfree(ptr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -118,7 +118,7 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg);
|
||||||
* both function can sleep
|
* both function can sleep
|
||||||
*/
|
*/
|
||||||
void *ipc_alloc(int size);
|
void *ipc_alloc(int size);
|
||||||
void ipc_free(void *ptr, int size);
|
void ipc_free(void *ptr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For allocation that need to be freed by RCU.
|
* For allocation that need to be freed by RCU.
|
||||||
|
|
18
mm/percpu.c
18
mm/percpu.c
|
@ -305,16 +305,12 @@ static void *pcpu_mem_zalloc(size_t size)
|
||||||
/**
|
/**
|
||||||
* pcpu_mem_free - free memory
|
* pcpu_mem_free - free memory
|
||||||
* @ptr: memory to free
|
* @ptr: memory to free
|
||||||
* @size: size of the area
|
|
||||||
*
|
*
|
||||||
* Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
|
* Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
|
||||||
*/
|
*/
|
||||||
static void pcpu_mem_free(void *ptr, size_t size)
|
static void pcpu_mem_free(void *ptr)
|
||||||
{
|
{
|
||||||
if (size <= PAGE_SIZE)
|
kvfree(ptr);
|
||||||
kfree(ptr);
|
|
||||||
else
|
|
||||||
vfree(ptr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -463,8 +459,8 @@ out_unlock:
|
||||||
* pcpu_mem_free() might end up calling vfree() which uses
|
* pcpu_mem_free() might end up calling vfree() which uses
|
||||||
* IRQ-unsafe lock and thus can't be called under pcpu_lock.
|
* IRQ-unsafe lock and thus can't be called under pcpu_lock.
|
||||||
*/
|
*/
|
||||||
pcpu_mem_free(old, old_size);
|
pcpu_mem_free(old);
|
||||||
pcpu_mem_free(new, new_size);
|
pcpu_mem_free(new);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -732,7 +728,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
|
||||||
chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
|
chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
|
||||||
sizeof(chunk->map[0]));
|
sizeof(chunk->map[0]));
|
||||||
if (!chunk->map) {
|
if (!chunk->map) {
|
||||||
pcpu_mem_free(chunk, pcpu_chunk_struct_size);
|
pcpu_mem_free(chunk);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -753,8 +749,8 @@ static void pcpu_free_chunk(struct pcpu_chunk *chunk)
|
||||||
{
|
{
|
||||||
if (!chunk)
|
if (!chunk)
|
||||||
return;
|
return;
|
||||||
pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
|
pcpu_mem_free(chunk->map);
|
||||||
pcpu_mem_free(chunk, pcpu_chunk_struct_size);
|
pcpu_mem_free(chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -289,10 +289,8 @@ static void __node_free_rcu(struct rcu_head *head)
|
||||||
|
|
||||||
if (!n->tn_bits)
|
if (!n->tn_bits)
|
||||||
kmem_cache_free(trie_leaf_kmem, n);
|
kmem_cache_free(trie_leaf_kmem, n);
|
||||||
else if (n->tn_bits <= TNODE_KMALLOC_MAX)
|
|
||||||
kfree(n);
|
|
||||||
else
|
else
|
||||||
vfree(n);
|
kvfree(n);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define node_free(n) call_rcu(&tn_info(n)->rcu, __node_free_rcu)
|
#define node_free(n) call_rcu(&tn_info(n)->rcu, __node_free_rcu)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче