ext4: each filesystem creates and uses its own mb_cache
This patch adds new interfaces to create and destory cache, ext4_xattr_create_cache() and ext4_xattr_destroy_cache(), and remove the cache creation and destory calls from ex4_init_xattr() and ext4_exitxattr() in fs/ext4/xattr.c. fs/ext4/super.c has been changed so that when a filesystem is mounted a cache is allocated and attched to its ext4_sb_info structure. fs/mbcache.c has been changed so that only one slab allocator is allocated and used by all mbcache structures. Signed-off-by: T. Makphaibulchoke <tmac@hp.com>
This commit is contained in:
Родитель
1f3e55fe02
Коммит
9c191f701c
|
@ -1329,6 +1329,7 @@ struct ext4_sb_info {
|
|||
struct list_head s_es_lru;
|
||||
unsigned long s_es_last_sorted;
|
||||
struct percpu_counter s_extent_cache_cnt;
|
||||
struct mb_cache *s_mb_cache;
|
||||
spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
|
||||
|
||||
/* Ratelimit ext4 messages. */
|
||||
|
|
|
@ -59,6 +59,7 @@ static struct kset *ext4_kset;
|
|||
static struct ext4_lazy_init *ext4_li_info;
|
||||
static struct mutex ext4_li_mtx;
|
||||
static struct ext4_features *ext4_feat;
|
||||
static int ext4_mballoc_ready;
|
||||
|
||||
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
|
||||
unsigned long journal_devnum);
|
||||
|
@ -845,6 +846,10 @@ static void ext4_put_super(struct super_block *sb)
|
|||
invalidate_bdev(sbi->journal_bdev);
|
||||
ext4_blkdev_remove(sbi);
|
||||
}
|
||||
if (sbi->s_mb_cache) {
|
||||
ext4_xattr_destroy_cache(sbi->s_mb_cache);
|
||||
sbi->s_mb_cache = NULL;
|
||||
}
|
||||
if (sbi->s_mmp_tsk)
|
||||
kthread_stop(sbi->s_mmp_tsk);
|
||||
sb->s_fs_info = NULL;
|
||||
|
@ -4010,6 +4015,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
percpu_counter_set(&sbi->s_dirtyclusters_counter, 0);
|
||||
|
||||
no_journal:
|
||||
if (ext4_mballoc_ready) {
|
||||
sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id);
|
||||
if (!sbi->s_mb_cache) {
|
||||
ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache");
|
||||
goto failed_mount_wq;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the # of file system overhead blocks from the
|
||||
* superblock if present.
|
||||
|
@ -5518,12 +5531,10 @@ static int __init ext4_init_fs(void)
|
|||
goto out4;
|
||||
|
||||
err = ext4_init_mballoc();
|
||||
if (err)
|
||||
goto out3;
|
||||
|
||||
err = ext4_init_xattr();
|
||||
if (err)
|
||||
goto out2;
|
||||
else
|
||||
ext4_mballoc_ready = 1;
|
||||
err = init_inodecache();
|
||||
if (err)
|
||||
goto out1;
|
||||
|
@ -5539,10 +5550,9 @@ out:
|
|||
unregister_as_ext3();
|
||||
destroy_inodecache();
|
||||
out1:
|
||||
ext4_exit_xattr();
|
||||
out2:
|
||||
ext4_mballoc_ready = 0;
|
||||
ext4_exit_mballoc();
|
||||
out3:
|
||||
out2:
|
||||
ext4_exit_feat_adverts();
|
||||
out4:
|
||||
if (ext4_proc_root)
|
||||
|
@ -5565,7 +5575,6 @@ static void __exit ext4_exit_fs(void)
|
|||
unregister_as_ext3();
|
||||
unregister_filesystem(&ext4_fs_type);
|
||||
destroy_inodecache();
|
||||
ext4_exit_xattr();
|
||||
ext4_exit_mballoc();
|
||||
ext4_exit_feat_adverts();
|
||||
remove_proc_entry("fs/ext4", NULL);
|
||||
|
|
|
@ -81,7 +81,7 @@
|
|||
# define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
static void ext4_xattr_cache_insert(struct buffer_head *);
|
||||
static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
|
||||
static struct buffer_head *ext4_xattr_cache_find(struct inode *,
|
||||
struct ext4_xattr_header *,
|
||||
struct mb_cache_entry **);
|
||||
|
@ -90,8 +90,6 @@ static void ext4_xattr_rehash(struct ext4_xattr_header *,
|
|||
static int ext4_xattr_list(struct dentry *dentry, char *buffer,
|
||||
size_t buffer_size);
|
||||
|
||||
static struct mb_cache *ext4_xattr_cache;
|
||||
|
||||
static const struct xattr_handler *ext4_xattr_handler_map[] = {
|
||||
[EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler,
|
||||
#ifdef CONFIG_EXT4_FS_POSIX_ACL
|
||||
|
@ -117,6 +115,9 @@ const struct xattr_handler *ext4_xattr_handlers[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
#define EXT4_GET_MB_CACHE(inode) (((struct ext4_sb_info *) \
|
||||
inode->i_sb->s_fs_info)->s_mb_cache)
|
||||
|
||||
static __le32 ext4_xattr_block_csum(struct inode *inode,
|
||||
sector_t block_nr,
|
||||
struct ext4_xattr_header *hdr)
|
||||
|
@ -265,6 +266,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
|
|||
struct ext4_xattr_entry *entry;
|
||||
size_t size;
|
||||
int error;
|
||||
struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
|
||||
ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
|
||||
name_index, name, buffer, (long)buffer_size);
|
||||
|
@ -286,7 +288,7 @@ bad_block:
|
|||
error = -EIO;
|
||||
goto cleanup;
|
||||
}
|
||||
ext4_xattr_cache_insert(bh);
|
||||
ext4_xattr_cache_insert(ext4_mb_cache, bh);
|
||||
entry = BFIRST(bh);
|
||||
error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
|
||||
if (error == -EIO)
|
||||
|
@ -409,6 +411,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
|
|||
struct inode *inode = dentry->d_inode;
|
||||
struct buffer_head *bh = NULL;
|
||||
int error;
|
||||
struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
|
||||
ea_idebug(inode, "buffer=%p, buffer_size=%ld",
|
||||
buffer, (long)buffer_size);
|
||||
|
@ -430,7 +433,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
|
|||
error = -EIO;
|
||||
goto cleanup;
|
||||
}
|
||||
ext4_xattr_cache_insert(bh);
|
||||
ext4_xattr_cache_insert(ext4_mb_cache, bh);
|
||||
error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
|
||||
|
||||
cleanup:
|
||||
|
@ -526,8 +529,9 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
|
|||
{
|
||||
struct mb_cache_entry *ce = NULL;
|
||||
int error = 0;
|
||||
struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
|
||||
ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr);
|
||||
ce = mb_cache_entry_get(ext4_mb_cache, bh->b_bdev, bh->b_blocknr);
|
||||
error = ext4_journal_get_write_access(handle, bh);
|
||||
if (error)
|
||||
goto out;
|
||||
|
@ -746,13 +750,14 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
|||
struct ext4_xattr_search *s = &bs->s;
|
||||
struct mb_cache_entry *ce = NULL;
|
||||
int error = 0;
|
||||
struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
|
||||
#define header(x) ((struct ext4_xattr_header *)(x))
|
||||
|
||||
if (i->value && i->value_len > sb->s_blocksize)
|
||||
return -ENOSPC;
|
||||
if (s->base) {
|
||||
ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
|
||||
ce = mb_cache_entry_get(ext4_mb_cache, bs->bh->b_bdev,
|
||||
bs->bh->b_blocknr);
|
||||
error = ext4_journal_get_write_access(handle, bs->bh);
|
||||
if (error)
|
||||
|
@ -770,7 +775,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
|||
if (!IS_LAST_ENTRY(s->first))
|
||||
ext4_xattr_rehash(header(s->base),
|
||||
s->here);
|
||||
ext4_xattr_cache_insert(bs->bh);
|
||||
ext4_xattr_cache_insert(ext4_mb_cache,
|
||||
bs->bh);
|
||||
}
|
||||
unlock_buffer(bs->bh);
|
||||
if (error == -EIO)
|
||||
|
@ -906,7 +912,7 @@ getblk_failed:
|
|||
memcpy(new_bh->b_data, s->base, new_bh->b_size);
|
||||
set_buffer_uptodate(new_bh);
|
||||
unlock_buffer(new_bh);
|
||||
ext4_xattr_cache_insert(new_bh);
|
||||
ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
|
||||
error = ext4_handle_dirty_xattr_block(handle,
|
||||
inode, new_bh);
|
||||
if (error)
|
||||
|
@ -1495,13 +1501,13 @@ ext4_xattr_put_super(struct super_block *sb)
|
|||
* Returns 0, or a negative error number on failure.
|
||||
*/
|
||||
static void
|
||||
ext4_xattr_cache_insert(struct buffer_head *bh)
|
||||
ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh)
|
||||
{
|
||||
__u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
|
||||
struct mb_cache_entry *ce;
|
||||
int error;
|
||||
|
||||
ce = mb_cache_entry_alloc(ext4_xattr_cache, GFP_NOFS);
|
||||
ce = mb_cache_entry_alloc(ext4_mb_cache, GFP_NOFS);
|
||||
if (!ce) {
|
||||
ea_bdebug(bh, "out of memory");
|
||||
return;
|
||||
|
@ -1573,12 +1579,13 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
|
|||
{
|
||||
__u32 hash = le32_to_cpu(header->h_hash);
|
||||
struct mb_cache_entry *ce;
|
||||
struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
|
||||
if (!header->h_hash)
|
||||
return NULL; /* never share */
|
||||
ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
|
||||
again:
|
||||
ce = mb_cache_entry_find_first(ext4_xattr_cache, inode->i_sb->s_bdev,
|
||||
ce = mb_cache_entry_find_first(ext4_mb_cache, inode->i_sb->s_bdev,
|
||||
hash);
|
||||
while (ce) {
|
||||
struct buffer_head *bh;
|
||||
|
@ -1676,19 +1683,17 @@ static void ext4_xattr_rehash(struct ext4_xattr_header *header,
|
|||
|
||||
#undef BLOCK_HASH_SHIFT
|
||||
|
||||
int __init
|
||||
ext4_init_xattr(void)
|
||||
#define HASH_BUCKET_BITS 10
|
||||
|
||||
struct mb_cache *
|
||||
ext4_xattr_create_cache(char *name)
|
||||
{
|
||||
ext4_xattr_cache = mb_cache_create("ext4_xattr", 6);
|
||||
if (!ext4_xattr_cache)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
return mb_cache_create(name, HASH_BUCKET_BITS);
|
||||
}
|
||||
|
||||
void
|
||||
ext4_exit_xattr(void)
|
||||
void ext4_xattr_destroy_cache(struct mb_cache *cache)
|
||||
{
|
||||
if (ext4_xattr_cache)
|
||||
mb_cache_destroy(ext4_xattr_cache);
|
||||
ext4_xattr_cache = NULL;
|
||||
if (cache)
|
||||
mb_cache_destroy(cache);
|
||||
}
|
||||
|
||||
|
|
|
@ -110,9 +110,6 @@ extern void ext4_xattr_put_super(struct super_block *);
|
|||
extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
|
||||
struct ext4_inode *raw_inode, handle_t *handle);
|
||||
|
||||
extern int __init ext4_init_xattr(void);
|
||||
extern void ext4_exit_xattr(void);
|
||||
|
||||
extern const struct xattr_handler *ext4_xattr_handlers[];
|
||||
|
||||
extern int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
|
||||
|
@ -124,6 +121,9 @@ extern int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
|
|||
struct ext4_xattr_info *i,
|
||||
struct ext4_xattr_ibody_find *is);
|
||||
|
||||
extern struct mb_cache *ext4_xattr_create_cache(char *name);
|
||||
extern void ext4_xattr_destroy_cache(struct mb_cache *);
|
||||
|
||||
#ifdef CONFIG_EXT4_FS_SECURITY
|
||||
extern int ext4_init_security(handle_t *handle, struct inode *inode,
|
||||
struct inode *dir, const struct qstr *qstr);
|
||||
|
|
18
fs/mbcache.c
18
fs/mbcache.c
|
@ -99,6 +99,7 @@
|
|||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
|
||||
static struct blockgroup_lock *mb_cache_bg_lock;
|
||||
static struct kmem_cache *mb_cache_kmem_cache;
|
||||
|
||||
MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
|
||||
MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
|
||||
|
@ -351,11 +352,14 @@ mb_cache_create(const char *name, int bucket_bits)
|
|||
goto fail;
|
||||
for (n=0; n<bucket_count; n++)
|
||||
INIT_HLIST_BL_HEAD(&cache->c_index_hash[n]);
|
||||
cache->c_entry_cache = kmem_cache_create(name,
|
||||
sizeof(struct mb_cache_entry), 0,
|
||||
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
|
||||
if (!cache->c_entry_cache)
|
||||
goto fail2;
|
||||
if (!mb_cache_kmem_cache) {
|
||||
mb_cache_kmem_cache = kmem_cache_create(name,
|
||||
sizeof(struct mb_cache_entry), 0,
|
||||
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
|
||||
if (!mb_cache_kmem_cache)
|
||||
goto fail2;
|
||||
}
|
||||
cache->c_entry_cache = mb_cache_kmem_cache;
|
||||
|
||||
/*
|
||||
* Set an upper limit on the number of cache entries so that the hash
|
||||
|
@ -476,6 +480,10 @@ mb_cache_destroy(struct mb_cache *cache)
|
|||
atomic_read(&cache->c_entry_count));
|
||||
}
|
||||
|
||||
if (list_empty(&mb_cache_list)) {
|
||||
kmem_cache_destroy(mb_cache_kmem_cache);
|
||||
mb_cache_kmem_cache = NULL;
|
||||
}
|
||||
kfree(cache->c_index_hash);
|
||||
kfree(cache->c_block_hash);
|
||||
kfree(cache);
|
||||
|
|
Загрузка…
Ссылка в новой задаче