cgroup: consistently use @cset for struct css_set variables

cgroup.c uses @cg for most struct css_set variables, which in itself
could be a bit confusing, but made much worse by the fact that there
are places which use @cg for struct cgroup variables.
compare_css_sets() epitomizes this confusion - @[old_]cg are struct
css_set while @cg[12] are struct cgroup.

It's not like the whole deal with cgroup, css_set and cg_cgroup_link
isn't already confusing enough.  Let's give it some sanity by
uniformly using @cset for all struct css_set variables.

* s/cg/cset/ for all css_set variables.

* s/oldcg/old_cset/ s/oldcgrp/old_cgrp/.  The same for the ones
  prefixed with "new".

* s/cg/cgrp/ for cgroup variables in compare_css_sets().

* s/css/cset/ for the cgroup variable in task_cgroup_from_root().

* Whiteline adjustments.

This patch is purely cosmetic.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Li Zefan <lizefan@huawei.com>
This commit is contained in:
Tejun Heo 2013-06-12 21:04:49 -07:00
Родитель 3fc3db9a3a
Коммит 5abb885573
1 изменённых файлов: 109 добавлений и 107 удалений

Просмотреть файл

@ -376,30 +376,32 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
* compiled into their kernel but not actually in use */
static int use_task_css_set_links __read_mostly;
static void __put_css_set(struct css_set *cg, int taskexit)
static void __put_css_set(struct css_set *cset, int taskexit)
{
struct cg_cgroup_link *link;
struct cg_cgroup_link *saved_link;
/*
* Ensure that the refcount doesn't hit zero while any readers
* can see it. Similar to atomic_dec_and_lock(), but for an
* rwlock
*/
if (atomic_add_unless(&cg->refcount, -1, 1))
if (atomic_add_unless(&cset->refcount, -1, 1))
return;
write_lock(&css_set_lock);
if (!atomic_dec_and_test(&cg->refcount)) {
if (!atomic_dec_and_test(&cset->refcount)) {
write_unlock(&css_set_lock);
return;
}
/* This css_set is dead. unlink it and release cgroup refcounts */
hash_del(&cg->hlist);
hash_del(&cset->hlist);
css_set_count--;
list_for_each_entry_safe(link, saved_link, &cg->cg_links,
list_for_each_entry_safe(link, saved_link, &cset->cg_links,
cg_link_list) {
struct cgroup *cgrp = link->cgrp;
list_del(&link->cg_link_list);
list_del(&link->cgrp_link_list);
@ -421,45 +423,45 @@ static void __put_css_set(struct css_set *cg, int taskexit)
}
write_unlock(&css_set_lock);
kfree_rcu(cg, rcu_head);
kfree_rcu(cset, rcu_head);
}
/*
* refcounted get/put for css_set objects
*/
static inline void get_css_set(struct css_set *cg)
static inline void get_css_set(struct css_set *cset)
{
atomic_inc(&cg->refcount);
atomic_inc(&cset->refcount);
}
static inline void put_css_set(struct css_set *cg)
static inline void put_css_set(struct css_set *cset)
{
__put_css_set(cg, 0);
__put_css_set(cset, 0);
}
static inline void put_css_set_taskexit(struct css_set *cg)
static inline void put_css_set_taskexit(struct css_set *cset)
{
__put_css_set(cg, 1);
__put_css_set(cset, 1);
}
/*
* compare_css_sets - helper function for find_existing_css_set().
* @cg: candidate css_set being tested
* @old_cg: existing css_set for a task
* @cset: candidate css_set being tested
* @old_cset: existing css_set for a task
* @new_cgrp: cgroup that's being entered by the task
* @template: desired set of css pointers in css_set (pre-calculated)
*
* Returns true if "cg" matches "old_cg" except for the hierarchy
* which "new_cgrp" belongs to, for which it should match "new_cgrp".
*/
static bool compare_css_sets(struct css_set *cg,
struct css_set *old_cg,
static bool compare_css_sets(struct css_set *cset,
struct css_set *old_cset,
struct cgroup *new_cgrp,
struct cgroup_subsys_state *template[])
{
struct list_head *l1, *l2;
if (memcmp(template, cg->subsys, sizeof(cg->subsys))) {
if (memcmp(template, cset->subsys, sizeof(cset->subsys))) {
/* Not all subsystems matched */
return false;
}
@ -473,28 +475,28 @@ static bool compare_css_sets(struct css_set *cg,
* candidates.
*/
l1 = &cg->cg_links;
l2 = &old_cg->cg_links;
l1 = &cset->cg_links;
l2 = &old_cset->cg_links;
while (1) {
struct cg_cgroup_link *cgl1, *cgl2;
struct cgroup *cg1, *cg2;
struct cgroup *cgrp1, *cgrp2;
l1 = l1->next;
l2 = l2->next;
/* See if we reached the end - both lists are equal length. */
if (l1 == &cg->cg_links) {
BUG_ON(l2 != &old_cg->cg_links);
if (l1 == &cset->cg_links) {
BUG_ON(l2 != &old_cset->cg_links);
break;
} else {
BUG_ON(l2 == &old_cg->cg_links);
BUG_ON(l2 == &old_cset->cg_links);
}
/* Locate the cgroups associated with these links. */
cgl1 = list_entry(l1, struct cg_cgroup_link, cg_link_list);
cgl2 = list_entry(l2, struct cg_cgroup_link, cg_link_list);
cg1 = cgl1->cgrp;
cg2 = cgl2->cgrp;
cgrp1 = cgl1->cgrp;
cgrp2 = cgl2->cgrp;
/* Hierarchies should be linked in the same order. */
BUG_ON(cg1->root != cg2->root);
BUG_ON(cgrp1->root != cgrp2->root);
/*
* If this hierarchy is the hierarchy of the cgroup
@ -503,11 +505,11 @@ static bool compare_css_sets(struct css_set *cg,
* hierarchy, then this css_set should point to the
* same cgroup as the old css_set.
*/
if (cg1->root == new_cgrp->root) {
if (cg1 != new_cgrp)
if (cgrp1->root == new_cgrp->root) {
if (cgrp1 != new_cgrp)
return false;
} else {
if (cg1 != cg2)
if (cgrp1 != cgrp2)
return false;
}
}
@ -527,14 +529,13 @@ static bool compare_css_sets(struct css_set *cg,
* template: location in which to build the desired set of subsystem
* state objects for the new cgroup group
*/
static struct css_set *find_existing_css_set(
struct css_set *oldcg,
struct cgroup *cgrp,
struct cgroup_subsys_state *template[])
static struct css_set *find_existing_css_set(struct css_set *old_cset,
struct cgroup *cgrp,
struct cgroup_subsys_state *template[])
{
int i;
struct cgroupfs_root *root = cgrp->root;
struct css_set *cg;
struct css_set *cset;
unsigned long key;
/*
@ -551,17 +552,17 @@ static struct css_set *find_existing_css_set(
} else {
/* Subsystem is not in this hierarchy, so we
* don't want to change the subsystem state */
template[i] = oldcg->subsys[i];
template[i] = old_cset->subsys[i];
}
}
key = css_set_hash(template);
hash_for_each_possible(css_set_table, cg, hlist, key) {
if (!compare_css_sets(cg, oldcg, cgrp, template))
hash_for_each_possible(css_set_table, cset, hlist, key) {
if (!compare_css_sets(cset, old_cset, cgrp, template))
continue;
/* This css_set matches what we need */
return cg;
return cset;
}
/* No existing cgroup group matched */
@ -603,18 +604,18 @@ static int allocate_cg_links(int count, struct list_head *tmp)
/**
* link_css_set - a helper function to link a css_set to a cgroup
* @tmp_cg_links: cg_cgroup_link objects allocated by allocate_cg_links()
* @cg: the css_set to be linked
* @cset: the css_set to be linked
* @cgrp: the destination cgroup
*/
static void link_css_set(struct list_head *tmp_cg_links,
struct css_set *cg, struct cgroup *cgrp)
struct css_set *cset, struct cgroup *cgrp)
{
struct cg_cgroup_link *link;
BUG_ON(list_empty(tmp_cg_links));
link = list_first_entry(tmp_cg_links, struct cg_cgroup_link,
cgrp_link_list);
link->cg = cg;
link->cg = cset;
link->cgrp = cgrp;
atomic_inc(&cgrp->count);
list_move(&link->cgrp_link_list, &cgrp->css_sets);
@ -622,7 +623,7 @@ static void link_css_set(struct list_head *tmp_cg_links,
* Always add links to the tail of the list so that the list
* is sorted by order of hierarchy creation
*/
list_add_tail(&link->cg_link_list, &cg->cg_links);
list_add_tail(&link->cg_link_list, &cset->cg_links);
}
/*
@ -632,10 +633,10 @@ static void link_css_set(struct list_head *tmp_cg_links,
* substituted into the appropriate hierarchy. Must be called with
* cgroup_mutex held
*/
static struct css_set *find_css_set(
struct css_set *oldcg, struct cgroup *cgrp)
static struct css_set *find_css_set(struct css_set *old_cset,
struct cgroup *cgrp)
{
struct css_set *res;
struct css_set *cset;
struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
struct list_head tmp_cg_links;
@ -646,40 +647,40 @@ static struct css_set *find_css_set(
/* First see if we already have a cgroup group that matches
* the desired set */
read_lock(&css_set_lock);
res = find_existing_css_set(oldcg, cgrp, template);
if (res)
get_css_set(res);
cset = find_existing_css_set(old_cset, cgrp, template);
if (cset)
get_css_set(cset);
read_unlock(&css_set_lock);
if (res)
return res;
if (cset)
return cset;
res = kmalloc(sizeof(*res), GFP_KERNEL);
if (!res)
cset = kmalloc(sizeof(*cset), GFP_KERNEL);
if (!cset)
return NULL;
/* Allocate all the cg_cgroup_link objects that we'll need */
if (allocate_cg_links(root_count, &tmp_cg_links) < 0) {
kfree(res);
kfree(cset);
return NULL;
}
atomic_set(&res->refcount, 1);
INIT_LIST_HEAD(&res->cg_links);
INIT_LIST_HEAD(&res->tasks);
INIT_HLIST_NODE(&res->hlist);
atomic_set(&cset->refcount, 1);
INIT_LIST_HEAD(&cset->cg_links);
INIT_LIST_HEAD(&cset->tasks);
INIT_HLIST_NODE(&cset->hlist);
/* Copy the set of subsystem state objects generated in
* find_existing_css_set() */
memcpy(res->subsys, template, sizeof(res->subsys));
memcpy(cset->subsys, template, sizeof(cset->subsys));
write_lock(&css_set_lock);
/* Add reference counts and links from the new css_set. */
list_for_each_entry(link, &oldcg->cg_links, cg_link_list) {
list_for_each_entry(link, &old_cset->cg_links, cg_link_list) {
struct cgroup *c = link->cgrp;
if (c->root == cgrp->root)
c = cgrp;
link_css_set(&tmp_cg_links, res, c);
link_css_set(&tmp_cg_links, cset, c);
}
BUG_ON(!list_empty(&tmp_cg_links));
@ -687,12 +688,12 @@ static struct css_set *find_css_set(
css_set_count++;
/* Add this cgroup group to the hash table */
key = css_set_hash(res->subsys);
hash_add(css_set_table, &res->hlist, key);
key = css_set_hash(cset->subsys);
hash_add(css_set_table, &cset->hlist, key);
write_unlock(&css_set_lock);
return res;
return cset;
}
/*
@ -702,7 +703,7 @@ static struct css_set *find_css_set(
static struct cgroup *task_cgroup_from_root(struct task_struct *task,
struct cgroupfs_root *root)
{
struct css_set *css;
struct css_set *cset;
struct cgroup *res = NULL;
BUG_ON(!mutex_is_locked(&cgroup_mutex));
@ -712,12 +713,12 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task,
* task can't change groups, so the only thing that can happen
* is that it exits and its css is set back to init_css_set.
*/
css = task->cgroups;
if (css == &init_css_set) {
cset = task->cgroups;
if (cset == &init_css_set) {
res = &root->top_cgroup;
} else {
struct cg_cgroup_link *link;
list_for_each_entry(link, &css->cg_links, cg_link_list) {
list_for_each_entry(link, &cset->cg_links, cg_link_list) {
struct cgroup *c = link->cgrp;
if (c->root == root) {
res = c;
@ -1608,7 +1609,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
struct cgroupfs_root *existing_root;
const struct cred *cred;
int i;
struct css_set *cg;
struct css_set *cset;
BUG_ON(sb->s_root != NULL);
@ -1666,8 +1667,8 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
/* Link the top cgroup in this hierarchy into all
* the css_set objects */
write_lock(&css_set_lock);
hash_for_each(css_set_table, i, cg, hlist)
link_css_set(&tmp_cg_links, cg, root_cgrp);
hash_for_each(css_set_table, i, cset, hlist)
link_css_set(&tmp_cg_links, cset, root_cgrp);
write_unlock(&css_set_lock);
free_cg_links(&tmp_cg_links);
@ -1944,10 +1945,11 @@ EXPORT_SYMBOL_GPL(cgroup_taskset_size);
*
* Must be called with cgroup_mutex and threadgroup locked.
*/
static void cgroup_task_migrate(struct cgroup *oldcgrp,
struct task_struct *tsk, struct css_set *newcg)
static void cgroup_task_migrate(struct cgroup *old_cgrp,
struct task_struct *tsk,
struct css_set *new_cset)
{
struct css_set *oldcg;
struct css_set *old_cset;
/*
* We are synchronized through threadgroup_lock() against PF_EXITING
@ -1955,25 +1957,25 @@ static void cgroup_task_migrate(struct cgroup *oldcgrp,
* css_set to init_css_set and dropping the old one.
*/
WARN_ON_ONCE(tsk->flags & PF_EXITING);
oldcg = tsk->cgroups;
old_cset = tsk->cgroups;
task_lock(tsk);
rcu_assign_pointer(tsk->cgroups, newcg);
rcu_assign_pointer(tsk->cgroups, new_cset);
task_unlock(tsk);
/* Update the css_set linked lists if we're using them */
write_lock(&css_set_lock);
if (!list_empty(&tsk->cg_list))
list_move(&tsk->cg_list, &newcg->tasks);
list_move(&tsk->cg_list, &new_cset->tasks);
write_unlock(&css_set_lock);
/*
* We just gained a reference on oldcg by taking it from the task. As
* trading it for newcg is protected by cgroup_mutex, we're safe to drop
* it here; it will be freed under RCU.
* We just gained a reference on old_cset by taking it from the
* task. As trading it for new_cset is protected by cgroup_mutex,
* we're safe to drop it here; it will be freed under RCU.
*/
set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
put_css_set(oldcg);
set_bit(CGRP_RELEASABLE, &old_cgrp->flags);
put_css_set(old_cset);
}
/**
@ -2925,7 +2927,7 @@ static void cgroup_advance_iter(struct cgroup *cgrp,
{
struct list_head *l = it->cg_link;
struct cg_cgroup_link *link;
struct css_set *cg;
struct css_set *cset;
/* Advance to the next non-empty css_set */
do {
@ -2935,10 +2937,10 @@ static void cgroup_advance_iter(struct cgroup *cgrp,
return;
}
link = list_entry(l, struct cg_cgroup_link, cgrp_link_list);
cg = link->cg;
} while (list_empty(&cg->tasks));
cset = link->cg;
} while (list_empty(&cset->tasks));
it->cg_link = l;
it->task = cg->tasks.next;
it->task = cset->tasks.next;
}
/*
@ -4516,7 +4518,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
struct cgroup_subsys_state *css;
int i, ret;
struct hlist_node *tmp;
struct css_set *cg;
struct css_set *cset;
unsigned long key;
/* check name and function validity */
@ -4583,17 +4585,17 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
* this is all done under the css_set_lock.
*/
write_lock(&css_set_lock);
hash_for_each_safe(css_set_table, i, tmp, cg, hlist) {
hash_for_each_safe(css_set_table, i, tmp, cset, hlist) {
/* skip entries that we already rehashed */
if (cg->subsys[ss->subsys_id])
if (cset->subsys[ss->subsys_id])
continue;
/* remove existing entry */
hash_del(&cg->hlist);
hash_del(&cset->hlist);
/* set new value */
cg->subsys[ss->subsys_id] = css;
cset->subsys[ss->subsys_id] = css;
/* recompute hash and restore entry */
key = css_set_hash(cg->subsys);
hash_add(css_set_table, &cg->hlist, key);
key = css_set_hash(cset->subsys);
hash_add(css_set_table, &cset->hlist, key);
}
write_unlock(&css_set_lock);
@ -4653,13 +4655,13 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
*/
write_lock(&css_set_lock);
list_for_each_entry(link, &dummytop->css_sets, cgrp_link_list) {
struct css_set *cg = link->cg;
struct css_set *cset = link->cg;
unsigned long key;
hash_del(&cg->hlist);
cg->subsys[ss->subsys_id] = NULL;
key = css_set_hash(cg->subsys);
hash_add(css_set_table, &cg->hlist, key);
hash_del(&cset->hlist);
cset->subsys[ss->subsys_id] = NULL;
key = css_set_hash(cset->subsys);
hash_add(css_set_table, &cset->hlist, key);
}
write_unlock(&css_set_lock);
@ -5006,7 +5008,7 @@ void cgroup_post_fork(struct task_struct *child)
*/
void cgroup_exit(struct task_struct *tsk, int run_callbacks)
{
struct css_set *cg;
struct css_set *cset;
int i;
/*
@ -5023,7 +5025,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
/* Reassign the task to the init_css_set. */
task_lock(tsk);
cg = tsk->cgroups;
cset = tsk->cgroups;
tsk->cgroups = &init_css_set;
if (run_callbacks && need_forkexit_callback) {
@ -5036,7 +5038,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
if (ss->exit) {
struct cgroup *old_cgrp =
rcu_dereference_raw(cg->subsys[i])->cgroup;
rcu_dereference_raw(cset->subsys[i])->cgroup;
struct cgroup *cgrp = task_cgroup(tsk, i);
ss->exit(cgrp, old_cgrp, tsk);
}
@ -5044,7 +5046,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
}
task_unlock(tsk);
put_css_set_taskexit(cg);
put_css_set_taskexit(cset);
}
static void check_for_release(struct cgroup *cgrp)
@ -5453,12 +5455,12 @@ static int current_css_set_cg_links_read(struct cgroup *cont,
struct seq_file *seq)
{
struct cg_cgroup_link *link;
struct css_set *cg;
struct css_set *cset;
read_lock(&css_set_lock);
rcu_read_lock();
cg = rcu_dereference(current->cgroups);
list_for_each_entry(link, &cg->cg_links, cg_link_list) {
cset = rcu_dereference(current->cgroups);
list_for_each_entry(link, &cset->cg_links, cg_link_list) {
struct cgroup *c = link->cgrp;
const char *name;
@ -5483,11 +5485,11 @@ static int cgroup_css_links_read(struct cgroup *cont,
read_lock(&css_set_lock);
list_for_each_entry(link, &cont->css_sets, cgrp_link_list) {
struct css_set *cg = link->cg;
struct css_set *cset = link->cg;
struct task_struct *task;
int count = 0;
seq_printf(seq, "css_set %p\n", cg);
list_for_each_entry(task, &cg->tasks, cg_list) {
seq_printf(seq, "css_set %p\n", cset);
list_for_each_entry(task, &cset->tasks, cg_list) {
if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
seq_puts(seq, " ...\n");
break;