[XFS] Cleanup the use of zones/slabs, more consistent and allows flags to
be passed. SGI-PV: 949073 SGI-Modid: xfs-linux-melb:xfs-kern:25122a Signed-off-by: Nathan Scott <nathans@sgi.com>
This commit is contained in:
Родитель
8d280b98cf
Коммит
8758280fcc
|
@ -23,17 +23,8 @@
|
|||
#include <linux/mm.h>
|
||||
|
||||
/*
|
||||
* memory management routines
|
||||
* Process flags handling
|
||||
*/
|
||||
#define KM_SLEEP 0x0001u
|
||||
#define KM_NOSLEEP 0x0002u
|
||||
#define KM_NOFS 0x0004u
|
||||
#define KM_MAYFAIL 0x0008u
|
||||
|
||||
#define kmem_zone kmem_cache
|
||||
#define kmem_zone_t struct kmem_cache
|
||||
|
||||
typedef unsigned long xfs_pflags_t;
|
||||
|
||||
#define PFLAGS_TEST_NOIO() (current->flags & PF_NOIO)
|
||||
#define PFLAGS_TEST_FSTRANS() (current->flags & PF_FSTRANS)
|
||||
|
@ -67,74 +58,102 @@ typedef unsigned long xfs_pflags_t;
|
|||
*(NSTATEP) = *(OSTATEP); \
|
||||
} while (0)
|
||||
|
||||
static __inline gfp_t kmem_flags_convert(unsigned int __nocast flags)
|
||||
{
|
||||
gfp_t lflags = __GFP_NOWARN; /* we'll report problems, if need be */
|
||||
/*
|
||||
* General memory allocation interfaces
|
||||
*/
|
||||
|
||||
#ifdef DEBUG
|
||||
if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) {
|
||||
printk(KERN_WARNING
|
||||
"XFS: memory allocation with wrong flags (%x)\n", flags);
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
#define KM_SLEEP 0x0001u
|
||||
#define KM_NOSLEEP 0x0002u
|
||||
#define KM_NOFS 0x0004u
|
||||
#define KM_MAYFAIL 0x0008u
|
||||
|
||||
/*
|
||||
* We use a special process flag to avoid recursive callbacks into
|
||||
* the filesystem during transactions. We will also issue our own
|
||||
* warnings, so we explicitly skip any generic ones (silly of us).
|
||||
*/
|
||||
static inline gfp_t
|
||||
kmem_flags_convert(unsigned int __nocast flags)
|
||||
{
|
||||
gfp_t lflags;
|
||||
|
||||
BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL));
|
||||
|
||||
if (flags & KM_NOSLEEP) {
|
||||
lflags |= GFP_ATOMIC;
|
||||
lflags = GFP_ATOMIC | __GFP_NOWARN;
|
||||
} else {
|
||||
lflags |= GFP_KERNEL;
|
||||
|
||||
/* avoid recusive callbacks to filesystem during transactions */
|
||||
lflags = GFP_KERNEL | __GFP_NOWARN;
|
||||
if (PFLAGS_TEST_FSTRANS() || (flags & KM_NOFS))
|
||||
lflags &= ~__GFP_FS;
|
||||
}
|
||||
|
||||
return lflags;
|
||||
return lflags;
|
||||
}
|
||||
|
||||
static __inline kmem_zone_t *
|
||||
kmem_zone_init(int size, char *zone_name)
|
||||
{
|
||||
return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
kmem_zone_free(kmem_zone_t *zone, void *ptr)
|
||||
{
|
||||
kmem_cache_free(zone, ptr);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
kmem_zone_destroy(kmem_zone_t *zone)
|
||||
{
|
||||
if (zone && kmem_cache_destroy(zone))
|
||||
BUG();
|
||||
}
|
||||
|
||||
extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
|
||||
extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
|
||||
|
||||
extern void *kmem_alloc(size_t, unsigned int __nocast);
|
||||
extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
|
||||
extern void *kmem_zalloc(size_t, unsigned int __nocast);
|
||||
extern void kmem_free(void *, size_t);
|
||||
|
||||
/*
|
||||
* Zone interfaces
|
||||
*/
|
||||
|
||||
#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN
|
||||
#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT
|
||||
#define KM_ZONE_SPREAD 0
|
||||
|
||||
#define kmem_zone kmem_cache
|
||||
#define kmem_zone_t struct kmem_cache
|
||||
|
||||
static inline kmem_zone_t *
|
||||
kmem_zone_init(int size, char *zone_name)
|
||||
{
|
||||
return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL);
|
||||
}
|
||||
|
||||
static inline kmem_zone_t *
|
||||
kmem_zone_init_flags(int size, char *zone_name, unsigned long flags,
|
||||
void (*construct)(void *, kmem_zone_t *, unsigned long))
|
||||
{
|
||||
return kmem_cache_create(zone_name, size, 0, flags, construct, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
kmem_zone_free(kmem_zone_t *zone, void *ptr)
|
||||
{
|
||||
kmem_cache_free(zone, ptr);
|
||||
}
|
||||
|
||||
static inline void
|
||||
kmem_zone_destroy(kmem_zone_t *zone)
|
||||
{
|
||||
if (zone && kmem_cache_destroy(zone))
|
||||
BUG();
|
||||
}
|
||||
|
||||
extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
|
||||
extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
|
||||
|
||||
/*
|
||||
* Low memory cache shrinkers
|
||||
*/
|
||||
|
||||
typedef struct shrinker *kmem_shaker_t;
|
||||
typedef int (*kmem_shake_func_t)(int, gfp_t);
|
||||
|
||||
static __inline kmem_shaker_t
|
||||
static inline kmem_shaker_t
|
||||
kmem_shake_register(kmem_shake_func_t sfunc)
|
||||
{
|
||||
return set_shrinker(DEFAULT_SEEKS, sfunc);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
static inline void
|
||||
kmem_shake_deregister(kmem_shaker_t shrinker)
|
||||
{
|
||||
remove_shrinker(shrinker);
|
||||
}
|
||||
|
||||
static __inline int
|
||||
static inline int
|
||||
kmem_shake_allow(gfp_t gfp_mask)
|
||||
{
|
||||
return (gfp_mask & __GFP_WAIT);
|
||||
|
|
|
@ -1805,13 +1805,12 @@ xfs_flush_buftarg(
|
|||
int __init
|
||||
xfs_buf_init(void)
|
||||
{
|
||||
int error = -ENOMEM;
|
||||
|
||||
#ifdef XFS_BUF_TRACE
|
||||
xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
|
||||
#endif
|
||||
|
||||
xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
|
||||
xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
|
||||
KM_ZONE_HWALIGN, NULL);
|
||||
if (!xfs_buf_zone)
|
||||
goto out_free_trace_buf;
|
||||
|
||||
|
@ -1839,7 +1838,7 @@ xfs_buf_init(void)
|
|||
#ifdef XFS_BUF_TRACE
|
||||
ktrace_free(xfs_buf_trace_buf);
|
||||
#endif
|
||||
return error;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -337,8 +337,8 @@ linvfs_alloc_inode(
|
|||
{
|
||||
vnode_t *vp;
|
||||
|
||||
vp = kmem_cache_alloc(xfs_vnode_zone, kmem_flags_convert(KM_SLEEP));
|
||||
if (!vp)
|
||||
vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
|
||||
if (unlikely(!vp))
|
||||
return NULL;
|
||||
return LINVFS_GET_IP(vp);
|
||||
}
|
||||
|
@ -352,23 +352,21 @@ linvfs_destroy_inode(
|
|||
|
||||
STATIC void
|
||||
linvfs_inode_init_once(
|
||||
void *data,
|
||||
kmem_cache_t *cachep,
|
||||
void *vnode,
|
||||
kmem_zone_t *zonep,
|
||||
unsigned long flags)
|
||||
{
|
||||
vnode_t *vp = (vnode_t *)data;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(LINVFS_GET_IP(vp));
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(LINVFS_GET_IP((vnode_t *)vnode));
|
||||
}
|
||||
|
||||
STATIC int
|
||||
linvfs_init_zones(void)
|
||||
xfs_init_zones(void)
|
||||
{
|
||||
xfs_vnode_zone = kmem_cache_create("xfs_vnode",
|
||||
sizeof(vnode_t), 0, SLAB_RECLAIM_ACCOUNT,
|
||||
linvfs_inode_init_once, NULL);
|
||||
xfs_vnode_zone = kmem_zone_init_flags(sizeof(vnode_t), "xfs_vnode_t",
|
||||
KM_ZONE_HWALIGN | KM_ZONE_RECLAIM,
|
||||
linvfs_inode_init_once);
|
||||
if (!xfs_vnode_zone)
|
||||
goto out;
|
||||
|
||||
|
@ -377,14 +375,12 @@ linvfs_init_zones(void)
|
|||
goto out_destroy_vnode_zone;
|
||||
|
||||
xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE,
|
||||
mempool_alloc_slab, mempool_free_slab,
|
||||
xfs_ioend_zone);
|
||||
mempool_alloc_slab, mempool_free_slab,
|
||||
xfs_ioend_zone);
|
||||
if (!xfs_ioend_pool)
|
||||
goto out_free_ioend_zone;
|
||||
|
||||
return 0;
|
||||
|
||||
|
||||
out_free_ioend_zone:
|
||||
kmem_zone_destroy(xfs_ioend_zone);
|
||||
out_destroy_vnode_zone:
|
||||
|
@ -394,7 +390,7 @@ linvfs_init_zones(void)
|
|||
}
|
||||
|
||||
STATIC void
|
||||
linvfs_destroy_zones(void)
|
||||
xfs_destroy_zones(void)
|
||||
{
|
||||
mempool_destroy(xfs_ioend_pool);
|
||||
kmem_zone_destroy(xfs_vnode_zone);
|
||||
|
@ -405,7 +401,7 @@ linvfs_destroy_zones(void)
|
|||
* Attempt to flush the inode, this will actually fail
|
||||
* if the inode is pinned, but we dirty the inode again
|
||||
* at the point when it is unpinned after a log write,
|
||||
* since this is when the inode itself becomes flushable.
|
||||
* since this is when the inode itself becomes flushable.
|
||||
*/
|
||||
STATIC int
|
||||
linvfs_write_inode(
|
||||
|
@ -963,7 +959,7 @@ init_xfs_fs( void )
|
|||
|
||||
ktrace_init(64);
|
||||
|
||||
error = linvfs_init_zones();
|
||||
error = xfs_init_zones();
|
||||
if (error < 0)
|
||||
goto undo_zones;
|
||||
|
||||
|
@ -986,7 +982,7 @@ undo_register:
|
|||
xfs_buf_terminate();
|
||||
|
||||
undo_buffers:
|
||||
linvfs_destroy_zones();
|
||||
xfs_destroy_zones();
|
||||
|
||||
undo_zones:
|
||||
return error;
|
||||
|
@ -1000,7 +996,7 @@ exit_xfs_fs( void )
|
|||
unregister_filesystem(&xfs_fs_type);
|
||||
xfs_cleanup();
|
||||
xfs_buf_terminate();
|
||||
linvfs_destroy_zones();
|
||||
xfs_destroy_zones();
|
||||
ktrace_uninit();
|
||||
}
|
||||
|
||||
|
|
|
@ -380,7 +380,7 @@ typedef struct xfs_trans {
|
|||
xfs_trans_header_t t_header; /* header for in-log trans */
|
||||
unsigned int t_busy_free; /* busy descs free */
|
||||
xfs_log_busy_chunk_t t_busy; /* busy/async free blocks */
|
||||
xfs_pflags_t t_pflags; /* saved pflags state */
|
||||
unsigned long t_pflags; /* saved process flags state */
|
||||
} xfs_trans_t;
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
@ -77,11 +77,12 @@ xfs_init(void)
|
|||
"xfs_bmap_free_item");
|
||||
xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
|
||||
"xfs_btree_cur");
|
||||
xfs_inode_zone = kmem_zone_init(sizeof(xfs_inode_t), "xfs_inode");
|
||||
xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
|
||||
xfs_da_state_zone =
|
||||
kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state");
|
||||
xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
|
||||
xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
|
||||
xfs_acl_zone_init(xfs_acl_zone, "xfs_acl");
|
||||
|
||||
/*
|
||||
* The size of the zone allocated buf log item is the maximum
|
||||
|
@ -93,17 +94,30 @@ xfs_init(void)
|
|||
(((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) /
|
||||
NBWORD) * sizeof(int))),
|
||||
"xfs_buf_item");
|
||||
xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
|
||||
((XFS_EFD_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))),
|
||||
xfs_efd_zone =
|
||||
kmem_zone_init((sizeof(xfs_efd_log_item_t) +
|
||||
((XFS_EFD_MAX_FAST_EXTENTS - 1) *
|
||||
sizeof(xfs_extent_t))),
|
||||
"xfs_efd_item");
|
||||
xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
|
||||
((XFS_EFI_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))),
|
||||
xfs_efi_zone =
|
||||
kmem_zone_init((sizeof(xfs_efi_log_item_t) +
|
||||
((XFS_EFI_MAX_FAST_EXTENTS - 1) *
|
||||
sizeof(xfs_extent_t))),
|
||||
"xfs_efi_item");
|
||||
xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
|
||||
xfs_ili_zone = kmem_zone_init(sizeof(xfs_inode_log_item_t), "xfs_ili");
|
||||
xfs_chashlist_zone = kmem_zone_init(sizeof(xfs_chashlist_t),
|
||||
"xfs_chashlist");
|
||||
xfs_acl_zone_init(xfs_acl_zone, "xfs_acl");
|
||||
|
||||
/*
|
||||
* These zones warrant special memory allocator hints
|
||||
*/
|
||||
xfs_inode_zone =
|
||||
kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
|
||||
KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
|
||||
KM_ZONE_SPREAD, NULL);
|
||||
xfs_ili_zone =
|
||||
kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
|
||||
KM_ZONE_SPREAD, NULL);
|
||||
xfs_chashlist_zone =
|
||||
kmem_zone_init_flags(sizeof(xfs_chashlist_t), "xfs_chashlist",
|
||||
KM_ZONE_SPREAD, NULL);
|
||||
|
||||
/*
|
||||
* Allocate global trace buffers.
|
||||
|
|
Загрузка…
Ссылка в новой задаче