NFS: Clean up nfs_access_zap_cache()

Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
This commit is contained in:
Trond Myklebust 2010-05-13 12:51:06 -04:00
Родитель 61d5eb2985
Коммит 1a81bb8a1f
1 изменённых файлов: 31 добавлений и 29 удалений

Просмотреть файл

@ -1697,6 +1697,17 @@ static void nfs_access_free_entry(struct nfs_access_entry *entry)
smp_mb__after_atomic_dec(); smp_mb__after_atomic_dec();
} }
static void nfs_access_free_list(struct list_head *head)
{
struct nfs_access_entry *cache;
while (!list_empty(head)) {
cache = list_entry(head->next, struct nfs_access_entry, lru);
list_del(&cache->lru);
nfs_access_free_entry(cache);
}
}
int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask)
{ {
LIST_HEAD(head); LIST_HEAD(head);
@ -1743,52 +1754,41 @@ remove_lru_entry:
goto restart; goto restart;
} }
spin_unlock(&nfs_access_lru_lock); spin_unlock(&nfs_access_lru_lock);
while (!list_empty(&head)) { nfs_access_free_list(&head);
cache = list_entry(head.next, struct nfs_access_entry, lru);
list_del(&cache->lru);
nfs_access_free_entry(cache);
}
return (atomic_long_read(&nfs_access_nr_entries) / 100) * sysctl_vfs_cache_pressure; return (atomic_long_read(&nfs_access_nr_entries) / 100) * sysctl_vfs_cache_pressure;
} }
static void __nfs_access_zap_cache(struct inode *inode) static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head)
{ {
struct nfs_inode *nfsi = NFS_I(inode);
struct rb_root *root_node = &nfsi->access_cache; struct rb_root *root_node = &nfsi->access_cache;
struct rb_node *n, *dispose = NULL; struct rb_node *n;
struct nfs_access_entry *entry; struct nfs_access_entry *entry;
/* Unhook entries from the cache */ /* Unhook entries from the cache */
while ((n = rb_first(root_node)) != NULL) { while ((n = rb_first(root_node)) != NULL) {
entry = rb_entry(n, struct nfs_access_entry, rb_node); entry = rb_entry(n, struct nfs_access_entry, rb_node);
rb_erase(n, root_node); rb_erase(n, root_node);
list_del(&entry->lru); list_move(&entry->lru, head);
n->rb_left = dispose;
dispose = n;
} }
nfsi->cache_validity &= ~NFS_INO_INVALID_ACCESS; nfsi->cache_validity &= ~NFS_INO_INVALID_ACCESS;
spin_unlock(&inode->i_lock);
/* Now kill them all! */
while (dispose != NULL) {
n = dispose;
dispose = n->rb_left;
nfs_access_free_entry(rb_entry(n, struct nfs_access_entry, rb_node));
}
} }
void nfs_access_zap_cache(struct inode *inode) void nfs_access_zap_cache(struct inode *inode)
{ {
LIST_HEAD(head);
if (test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags) == 0)
return;
/* Remove from global LRU init */ /* Remove from global LRU init */
if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) { spin_lock(&nfs_access_lru_lock);
spin_lock(&nfs_access_lru_lock); if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags))
list_del_init(&NFS_I(inode)->access_cache_inode_lru); list_del_init(&NFS_I(inode)->access_cache_inode_lru);
spin_unlock(&nfs_access_lru_lock);
}
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
/* This will release the spinlock */ __nfs_access_zap_cache(NFS_I(inode), &head);
__nfs_access_zap_cache(inode); spin_unlock(&inode->i_lock);
spin_unlock(&nfs_access_lru_lock);
nfs_access_free_list(&head);
} }
static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, struct rpc_cred *cred) static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, struct rpc_cred *cred)
@ -1839,8 +1839,8 @@ out_stale:
nfs_access_free_entry(cache); nfs_access_free_entry(cache);
return -ENOENT; return -ENOENT;
out_zap: out_zap:
/* This will release the spinlock */ spin_unlock(&inode->i_lock);
__nfs_access_zap_cache(inode); nfs_access_zap_cache(inode);
return -ENOENT; return -ENOENT;
} }
@ -1895,9 +1895,11 @@ static void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *s
smp_mb__after_atomic_inc(); smp_mb__after_atomic_inc();
/* Add inode to global LRU list */ /* Add inode to global LRU list */
if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) { if (!test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
spin_lock(&nfs_access_lru_lock); spin_lock(&nfs_access_lru_lock);
list_add_tail(&NFS_I(inode)->access_cache_inode_lru, &nfs_access_lru_list); if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags))
list_add_tail(&NFS_I(inode)->access_cache_inode_lru,
&nfs_access_lru_list);
spin_unlock(&nfs_access_lru_lock); spin_unlock(&nfs_access_lru_lock);
} }
} }