cgroups: make cftype.unregister_event() void-returning
Since we are unable to handle an error returned by cftype.unregister_event() properly, let's make the callback void-returning. mem_cgroup_unregister_event() has been rewritten to be a "never fail" function. On mem_cgroup_usage_register_event() we save old buffer for thresholds array and reuse it in mem_cgroup_usage_unregister_event() to avoid allocation. Signed-off-by: Kirill A. Shutemov <kirill@shutemov.name> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Phil Carmody <ext-phil.2.carmody@nokia.com> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Paul Menage <menage@google.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
ac39cf8cb8
Коммит
907860ed38
|
@ -397,7 +397,7 @@ struct cftype {
|
||||||
* This callback must be implemented, if you want provide
|
* This callback must be implemented, if you want provide
|
||||||
* notification functionality.
|
* notification functionality.
|
||||||
*/
|
*/
|
||||||
int (*unregister_event)(struct cgroup *cgrp, struct cftype *cft,
|
void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft,
|
||||||
struct eventfd_ctx *eventfd);
|
struct eventfd_ctx *eventfd);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -2994,7 +2994,6 @@ static void cgroup_event_remove(struct work_struct *work)
|
||||||
remove);
|
remove);
|
||||||
struct cgroup *cgrp = event->cgrp;
|
struct cgroup *cgrp = event->cgrp;
|
||||||
|
|
||||||
/* TODO: check return code */
|
|
||||||
event->cft->unregister_event(cgrp, event->cft, event->eventfd);
|
event->cft->unregister_event(cgrp, event->cft, event->eventfd);
|
||||||
|
|
||||||
eventfd_ctx_put(event->eventfd);
|
eventfd_ctx_put(event->eventfd);
|
||||||
|
|
|
@ -226,9 +226,19 @@ struct mem_cgroup {
|
||||||
/* thresholds for memory usage. RCU-protected */
|
/* thresholds for memory usage. RCU-protected */
|
||||||
struct mem_cgroup_threshold_ary *thresholds;
|
struct mem_cgroup_threshold_ary *thresholds;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Preallocated buffer to be used in mem_cgroup_unregister_event()
|
||||||
|
* to make it "never fail".
|
||||||
|
* It must be able to store at least thresholds->size - 1 entries.
|
||||||
|
*/
|
||||||
|
struct mem_cgroup_threshold_ary *__thresholds;
|
||||||
|
|
||||||
/* thresholds for mem+swap usage. RCU-protected */
|
/* thresholds for mem+swap usage. RCU-protected */
|
||||||
struct mem_cgroup_threshold_ary *memsw_thresholds;
|
struct mem_cgroup_threshold_ary *memsw_thresholds;
|
||||||
|
|
||||||
|
/* the same as __thresholds, but for memsw_thresholds */
|
||||||
|
struct mem_cgroup_threshold_ary *__memsw_thresholds;
|
||||||
|
|
||||||
/* For oom notifier event fd */
|
/* For oom notifier event fd */
|
||||||
struct list_head oom_notify;
|
struct list_head oom_notify;
|
||||||
|
|
||||||
|
@ -3604,17 +3614,27 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
|
||||||
else
|
else
|
||||||
rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
|
rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
|
||||||
|
|
||||||
/* To be sure that nobody uses thresholds before freeing it */
|
/* To be sure that nobody uses thresholds */
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
|
|
||||||
kfree(thresholds);
|
/*
|
||||||
|
* Free old preallocated buffer and use thresholds as new
|
||||||
|
* preallocated buffer.
|
||||||
|
*/
|
||||||
|
if (type == _MEM) {
|
||||||
|
kfree(memcg->__thresholds);
|
||||||
|
memcg->__thresholds = thresholds;
|
||||||
|
} else {
|
||||||
|
kfree(memcg->__memsw_thresholds);
|
||||||
|
memcg->__memsw_thresholds = thresholds;
|
||||||
|
}
|
||||||
unlock:
|
unlock:
|
||||||
mutex_unlock(&memcg->thresholds_lock);
|
mutex_unlock(&memcg->thresholds_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
|
static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
|
||||||
struct cftype *cft, struct eventfd_ctx *eventfd)
|
struct cftype *cft, struct eventfd_ctx *eventfd)
|
||||||
{
|
{
|
||||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
|
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
|
||||||
|
@ -3622,7 +3642,7 @@ static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
|
||||||
int type = MEMFILE_TYPE(cft->private);
|
int type = MEMFILE_TYPE(cft->private);
|
||||||
u64 usage;
|
u64 usage;
|
||||||
int size = 0;
|
int size = 0;
|
||||||
int i, j, ret = 0;
|
int i, j;
|
||||||
|
|
||||||
mutex_lock(&memcg->thresholds_lock);
|
mutex_lock(&memcg->thresholds_lock);
|
||||||
if (type == _MEM)
|
if (type == _MEM)
|
||||||
|
@ -3649,20 +3669,19 @@ static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
|
||||||
size++;
|
size++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Use preallocated buffer for new array of thresholds */
|
||||||
|
if (type == _MEM)
|
||||||
|
thresholds_new = memcg->__thresholds;
|
||||||
|
else
|
||||||
|
thresholds_new = memcg->__memsw_thresholds;
|
||||||
|
|
||||||
/* Set thresholds array to NULL if we don't have thresholds */
|
/* Set thresholds array to NULL if we don't have thresholds */
|
||||||
if (!size) {
|
if (!size) {
|
||||||
|
kfree(thresholds_new);
|
||||||
thresholds_new = NULL;
|
thresholds_new = NULL;
|
||||||
goto assign;
|
goto swap_buffers;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate memory for new array of thresholds */
|
|
||||||
thresholds_new = kmalloc(sizeof(*thresholds_new) +
|
|
||||||
size * sizeof(struct mem_cgroup_threshold),
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!thresholds_new) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto unlock;
|
|
||||||
}
|
|
||||||
thresholds_new->size = size;
|
thresholds_new->size = size;
|
||||||
|
|
||||||
/* Copy thresholds and find current threshold */
|
/* Copy thresholds and find current threshold */
|
||||||
|
@ -3683,20 +3702,20 @@ static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
|
||||||
j++;
|
j++;
|
||||||
}
|
}
|
||||||
|
|
||||||
assign:
|
swap_buffers:
|
||||||
if (type == _MEM)
|
/* Swap thresholds array and preallocated buffer */
|
||||||
|
if (type == _MEM) {
|
||||||
|
memcg->__thresholds = thresholds;
|
||||||
rcu_assign_pointer(memcg->thresholds, thresholds_new);
|
rcu_assign_pointer(memcg->thresholds, thresholds_new);
|
||||||
else
|
} else {
|
||||||
|
memcg->__memsw_thresholds = thresholds;
|
||||||
rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
|
rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
|
||||||
|
}
|
||||||
|
|
||||||
/* To be sure that nobody uses thresholds before freeing it */
|
/* To be sure that nobody uses thresholds */
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
|
|
||||||
kfree(thresholds);
|
|
||||||
unlock:
|
|
||||||
mutex_unlock(&memcg->thresholds_lock);
|
mutex_unlock(&memcg->thresholds_lock);
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
|
static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
|
||||||
|
@ -3724,7 +3743,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
|
static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
|
||||||
struct cftype *cft, struct eventfd_ctx *eventfd)
|
struct cftype *cft, struct eventfd_ctx *eventfd)
|
||||||
{
|
{
|
||||||
struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
|
struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
|
||||||
|
@ -3743,8 +3762,6 @@ static int mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&memcg_oom_mutex);
|
mutex_unlock(&memcg_oom_mutex);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
|
static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
|
||||||
|
|
Загрузка…
Ссылка в новой задаче