cgroup: remove cgroup_pidlist->rwsem
cgroup_pidlist locking is needlessly complicated. It has outer cgroup->pidlist_mutex to protect the list of pidlists associated with a cgroup and then each pidlist has rwsem to synchronize updates and reads. Given that the only read access is from seq_file operations which are always invoked back-to-back, the rwsem is a giant overkill. All it does is adding unnecessary complexity. This patch removes cgroup_pidlist->rwsem and protects all accesses to pidlists belonging to a cgroup with cgroup->pidlist_mutex. pidlist->rwsem locking is removed if it's nested inside cgroup->pidlist_mutex; otherwise, it's replaced with cgroup->pidlist_mutex locking. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com>
This commit is contained in:
Родитель
e6b817103d
Коммит
069df3b7ae
|
@ -3462,8 +3462,6 @@ struct cgroup_pidlist {
|
|||
struct list_head links;
|
||||
/* pointer to the cgroup we belong to, for list removal purposes */
|
||||
struct cgroup *owner;
|
||||
/* protects the other fields */
|
||||
struct rw_semaphore rwsem;
|
||||
/* for delayed destruction */
|
||||
struct delayed_work destroy_dwork;
|
||||
};
|
||||
|
@ -3522,7 +3520,6 @@ static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
|
|||
struct cgroup_pidlist *tofree = NULL;
|
||||
|
||||
mutex_lock(&l->owner->pidlist_mutex);
|
||||
down_write(&l->rwsem);
|
||||
|
||||
/*
|
||||
* Destroy iff we didn't race with a new user or get queued again.
|
||||
|
@ -3535,7 +3532,6 @@ static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
|
|||
tofree = l;
|
||||
}
|
||||
|
||||
up_write(&l->rwsem);
|
||||
mutex_unlock(&l->owner->pidlist_mutex);
|
||||
kfree(tofree);
|
||||
}
|
||||
|
@ -3612,7 +3608,6 @@ static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
|
|||
if (!l)
|
||||
return l;
|
||||
|
||||
init_rwsem(&l->rwsem);
|
||||
INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
|
||||
l->key.type = type;
|
||||
/* don't need task_nsproxy() if we're looking at ourself */
|
||||
|
@ -3675,12 +3670,10 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
|
|||
}
|
||||
|
||||
/* store array, freeing old if necessary */
|
||||
down_write(&l->rwsem);
|
||||
pidlist_free(l->list);
|
||||
l->list = array;
|
||||
l->length = length;
|
||||
l->use_count++;
|
||||
up_write(&l->rwsem);
|
||||
|
||||
mutex_unlock(&cgrp->pidlist_mutex);
|
||||
|
||||
|
@ -3762,7 +3755,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
|
|||
int index = 0, pid = *pos;
|
||||
int *iter;
|
||||
|
||||
down_read(&l->rwsem);
|
||||
mutex_lock(&of->cgrp->pidlist_mutex);
|
||||
if (pid) {
|
||||
int end = l->length;
|
||||
|
||||
|
@ -3790,7 +3783,7 @@ static void cgroup_pidlist_stop(struct seq_file *s, void *v)
|
|||
{
|
||||
struct cgroup_pidlist_open_file *of = s->private;
|
||||
|
||||
up_read(&of->pidlist->rwsem);
|
||||
mutex_unlock(&of->cgrp->pidlist_mutex);
|
||||
}
|
||||
|
||||
static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
|
||||
|
@ -3830,13 +3823,13 @@ static const struct seq_operations cgroup_pidlist_seq_operations = {
|
|||
|
||||
static void cgroup_release_pid_array(struct cgroup_pidlist *l)
|
||||
{
|
||||
down_write(&l->rwsem);
|
||||
mutex_lock(&l->owner->pidlist_mutex);
|
||||
BUG_ON(!l->use_count);
|
||||
/* if the last user, arm the destroy work */
|
||||
if (!--l->use_count)
|
||||
mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
|
||||
CGROUP_PIDLIST_DESTROY_DELAY);
|
||||
up_write(&l->rwsem);
|
||||
mutex_unlock(&l->owner->pidlist_mutex);
|
||||
}
|
||||
|
||||
static int cgroup_pidlist_release(struct inode *inode, struct file *file)
|
||||
|
|
Загрузка…
Ссылка в новой задаче