staging/lustre/obdclass: convert lu_object shrinker to count/scan API
convert lu_object shrinker to new count/scan API. Signed-off-by: Peng Tao <tao.peng@emc.com> Signed-off-by: Andreas Dilger <andreas.dilger@intel.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Dave Chinner <dchinner@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Родитель
cbc3769ecd
Коммит
fe92a0557a
|
@ -1779,7 +1779,6 @@ int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
|
|||
}
|
||||
EXPORT_SYMBOL(lu_env_refill_by_tags);
|
||||
|
||||
static struct shrinker *lu_site_shrinker = NULL;
|
||||
|
||||
typedef struct lu_site_stats{
|
||||
unsigned lss_populated;
|
||||
|
@ -1835,59 +1834,66 @@ static void lu_site_stats_get(cfs_hash_t *hs,
|
|||
* objects without taking the lu_sites_guard lock, but this is not
|
||||
* possible in the current implementation.
|
||||
*/
|
||||
static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
|
||||
static unsigned long lu_cache_shrink_count(struct shrinker *sk,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
lu_site_stats_t stats;
|
||||
struct lu_site *s;
|
||||
struct lu_site *tmp;
|
||||
int cached = 0;
|
||||
int remain = shrink_param(sc, nr_to_scan);
|
||||
LIST_HEAD(splice);
|
||||
unsigned long cached = 0;
|
||||
|
||||
if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) {
|
||||
if (remain != 0)
|
||||
return -1;
|
||||
else
|
||||
/* We must not take the lu_sites_guard lock when
|
||||
* __GFP_FS is *not* set because of the deadlock
|
||||
* possibility detailed above. Additionally,
|
||||
* since we cannot determine the number of
|
||||
* objects in the cache without taking this
|
||||
* lock, we're in a particularly tough spot. As
|
||||
* a result, we'll just lie and say our cache is
|
||||
* empty. This _should_ be ok, as we can't
|
||||
* reclaim objects when __GFP_FS is *not* set
|
||||
* anyways.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
CDEBUG(D_INODE, "Shrink %d objects\n", remain);
|
||||
if (!(sc->gfp_mask & __GFP_FS))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&lu_sites_guard);
|
||||
list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
|
||||
if (shrink_param(sc, nr_to_scan) != 0) {
|
||||
remain = lu_site_purge(&lu_shrink_env, s, remain);
|
||||
/*
|
||||
* Move just shrunk site to the tail of site list to
|
||||
* assure shrinking fairness.
|
||||
*/
|
||||
list_move_tail(&s->ls_linkage, &splice);
|
||||
}
|
||||
|
||||
memset(&stats, 0, sizeof(stats));
|
||||
lu_site_stats_get(s->ls_obj_hash, &stats, 0);
|
||||
cached += stats.lss_total - stats.lss_busy;
|
||||
if (shrink_param(sc, nr_to_scan) && remain <= 0)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&lu_sites_guard);
|
||||
|
||||
cached = (cached / 100) * sysctl_vfs_cache_pressure;
|
||||
CDEBUG(D_INODE, "%ld objects cached\n", cached);
|
||||
return cached;
|
||||
}
|
||||
|
||||
static unsigned long lu_cache_shrink_scan(struct shrinker *sk,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
struct lu_site *s;
|
||||
struct lu_site *tmp;
|
||||
unsigned long remain = sc->nr_to_scan, freed = 0;
|
||||
LIST_HEAD(splice);
|
||||
|
||||
if (!(sc->gfp_mask & __GFP_FS))
|
||||
/* We must not take the lu_sites_guard lock when
|
||||
* __GFP_FS is *not* set because of the deadlock
|
||||
* possibility detailed above. Additionally,
|
||||
* since we cannot determine the number of
|
||||
* objects in the cache without taking this
|
||||
* lock, we're in a particularly tough spot. As
|
||||
* a result, we'll just lie and say our cache is
|
||||
* empty. This _should_ be ok, as we can't
|
||||
* reclaim objects when __GFP_FS is *not* set
|
||||
* anyways.
|
||||
*/
|
||||
return SHRINK_STOP;
|
||||
|
||||
mutex_lock(&lu_sites_guard);
|
||||
list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
|
||||
freed = lu_site_purge(&lu_shrink_env, s, remain);
|
||||
remain -= freed;
|
||||
/*
|
||||
* Move just shrunk site to the tail of site list to
|
||||
* assure shrinking fairness.
|
||||
*/
|
||||
list_move_tail(&s->ls_linkage, &splice);
|
||||
}
|
||||
list_splice(&splice, lu_sites.prev);
|
||||
mutex_unlock(&lu_sites_guard);
|
||||
|
||||
cached = (cached / 100) * sysctl_vfs_cache_pressure;
|
||||
if (shrink_param(sc, nr_to_scan) == 0)
|
||||
CDEBUG(D_INODE, "%d objects cached\n", cached);
|
||||
return cached;
|
||||
return sc->nr_to_scan - remain;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1913,6 +1919,12 @@ int lu_printk_printer(const struct lu_env *env,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct shrinker lu_site_shrinker = {
|
||||
.count_objects = lu_cache_shrink_count,
|
||||
.scan_objects = lu_cache_shrink_scan,
|
||||
.seeks = DEFAULT_SEEKS,
|
||||
};
|
||||
|
||||
/**
|
||||
* Initialization of global lu_* data.
|
||||
*/
|
||||
|
@ -1947,9 +1959,7 @@ int lu_global_init(void)
|
|||
* inode, one for ea. Unfortunately setting this high value results in
|
||||
* lu_object/inode cache consuming all the memory.
|
||||
*/
|
||||
lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink);
|
||||
if (lu_site_shrinker == NULL)
|
||||
return -ENOMEM;
|
||||
register_shrinker(&lu_site_shrinker);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -1959,11 +1969,7 @@ int lu_global_init(void)
|
|||
*/
|
||||
void lu_global_fini(void)
|
||||
{
|
||||
if (lu_site_shrinker != NULL) {
|
||||
remove_shrinker(lu_site_shrinker);
|
||||
lu_site_shrinker = NULL;
|
||||
}
|
||||
|
||||
unregister_shrinker(&lu_site_shrinker);
|
||||
lu_context_key_degister(&lu_global_key);
|
||||
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче