mm, memcg: create mem_cgroup_from_seq
This is the start of a series of patches similar to my earlier DEFINE_MEMCG_MAX_OR_VAL work, but with less Macro Magic(tm). There are a bunch of places we go from seq_file to mem_cgroup, which currently requires manually getting the css, then getting the mem_cgroup from the css. It's in enough places now that having mem_cgroup_from_seq makes sense (and also makes the next patch a bit nicer). Link: http://lkml.kernel.org/r/20190124194050.GA31341@chrisdown.name Signed-off-by: Chris Down <chris@chrisdown.name> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Tejun Heo <tj@kernel.org> Cc: Roman Gushchin <guro@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
dc50537bdd
Коммит
aa9694bb78
|
@ -429,6 +429,11 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
|
|||
}
|
||||
struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
|
||||
|
||||
static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
|
||||
{
|
||||
return mem_cgroup_from_css(seq_css(m));
|
||||
}
|
||||
|
||||
static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
|
||||
{
|
||||
struct mem_cgroup_per_node *mz;
|
||||
|
@ -937,6 +942,11 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
|
||||
{
|
||||
return NULL;
|
||||
|
|
|
@ -3337,7 +3337,7 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
|
|||
const struct numa_stat *stat;
|
||||
int nid;
|
||||
unsigned long nr;
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
|
||||
|
||||
for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
|
||||
nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
|
||||
|
@ -3388,7 +3388,7 @@ static const char *const memcg1_event_names[] = {
|
|||
|
||||
static int memcg_stat_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
|
||||
unsigned long memory, memsw;
|
||||
struct mem_cgroup *mi;
|
||||
unsigned int i;
|
||||
|
@ -3820,7 +3820,7 @@ static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
|
|||
|
||||
static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
|
||||
|
||||
seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
|
||||
seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
|
||||
|
@ -5363,7 +5363,7 @@ static u64 memory_current_read(struct cgroup_subsys_state *css,
|
|||
|
||||
static int memory_min_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
|
||||
unsigned long min = READ_ONCE(memcg->memory.min);
|
||||
|
||||
if (min == PAGE_COUNTER_MAX)
|
||||
|
@ -5393,7 +5393,7 @@ static ssize_t memory_min_write(struct kernfs_open_file *of,
|
|||
|
||||
static int memory_low_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
|
||||
unsigned long low = READ_ONCE(memcg->memory.low);
|
||||
|
||||
if (low == PAGE_COUNTER_MAX)
|
||||
|
@ -5423,7 +5423,7 @@ static ssize_t memory_low_write(struct kernfs_open_file *of,
|
|||
|
||||
static int memory_high_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
|
||||
unsigned long high = READ_ONCE(memcg->high);
|
||||
|
||||
if (high == PAGE_COUNTER_MAX)
|
||||
|
@ -5460,7 +5460,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
|
|||
|
||||
static int memory_max_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
|
||||
unsigned long max = READ_ONCE(memcg->memory.max);
|
||||
|
||||
if (max == PAGE_COUNTER_MAX)
|
||||
|
@ -5522,7 +5522,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
|
|||
|
||||
static int memory_events_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
|
||||
|
||||
seq_printf(m, "low %lu\n",
|
||||
atomic_long_read(&memcg->memory_events[MEMCG_LOW]));
|
||||
|
@ -5540,7 +5540,7 @@ static int memory_events_show(struct seq_file *m, void *v)
|
|||
|
||||
static int memory_stat_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
|
||||
struct accumulated_stats acc;
|
||||
int i;
|
||||
|
||||
|
@ -5617,7 +5617,7 @@ static int memory_stat_show(struct seq_file *m, void *v)
|
|||
|
||||
static int memory_oom_group_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
|
||||
|
||||
seq_printf(m, "%d\n", memcg->oom_group);
|
||||
|
||||
|
@ -6600,7 +6600,7 @@ static u64 swap_current_read(struct cgroup_subsys_state *css,
|
|||
|
||||
static int swap_max_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
|
||||
unsigned long max = READ_ONCE(memcg->swap.max);
|
||||
|
||||
if (max == PAGE_COUNTER_MAX)
|
||||
|
@ -6630,7 +6630,7 @@ static ssize_t swap_max_write(struct kernfs_open_file *of,
|
|||
|
||||
static int swap_events_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
|
||||
|
||||
seq_printf(m, "max %lu\n",
|
||||
atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
|
||||
|
|
|
@ -1425,7 +1425,7 @@ void dump_unreclaimable_slab(void)
|
|||
#if defined(CONFIG_MEMCG)
|
||||
void *memcg_slab_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
|
||||
|
||||
mutex_lock(&slab_mutex);
|
||||
return seq_list_start(&memcg->kmem_caches, *pos);
|
||||
|
@ -1433,7 +1433,7 @@ void *memcg_slab_start(struct seq_file *m, loff_t *pos)
|
|||
|
||||
void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
|
||||
|
||||
return seq_list_next(p, &memcg->kmem_caches, pos);
|
||||
}
|
||||
|
@ -1447,7 +1447,7 @@ int memcg_slab_show(struct seq_file *m, void *p)
|
|||
{
|
||||
struct kmem_cache *s = list_entry(p, struct kmem_cache,
|
||||
memcg_params.kmem_caches_node);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
|
||||
|
||||
if (p == memcg->kmem_caches.next)
|
||||
print_slabinfo_header(m);
|
||||
|
|
Загрузка…
Ссылка в новой задаче