Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sched: Constify function scope static struct sched_param usage sched: Fix strncmp operation sched: Move sched_autogroup_exit() to free_signal_struct() sched: Fix struct autogroup memory leak sched: Mark autogroup_init() __init sched: Consolidate the name of root_task_group and init_task_group
This commit is contained in:
Коммит
e744070fd4
|
@ -2511,7 +2511,7 @@ extern void normalize_rt_tasks(void);
|
||||||
|
|
||||||
#ifdef CONFIG_CGROUP_SCHED
|
#ifdef CONFIG_CGROUP_SCHED
|
||||||
|
|
||||||
extern struct task_group init_task_group;
|
extern struct task_group root_task_group;
|
||||||
|
|
||||||
extern struct task_group *sched_create_group(struct task_group *parent);
|
extern struct task_group *sched_create_group(struct task_group *parent);
|
||||||
extern void sched_destroy_group(struct task_group *tg);
|
extern void sched_destroy_group(struct task_group *tg);
|
||||||
|
|
|
@ -169,15 +169,14 @@ EXPORT_SYMBOL(free_task);
|
||||||
static inline void free_signal_struct(struct signal_struct *sig)
|
static inline void free_signal_struct(struct signal_struct *sig)
|
||||||
{
|
{
|
||||||
taskstats_tgid_free(sig);
|
taskstats_tgid_free(sig);
|
||||||
|
sched_autogroup_exit(sig);
|
||||||
kmem_cache_free(signal_cachep, sig);
|
kmem_cache_free(signal_cachep, sig);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void put_signal_struct(struct signal_struct *sig)
|
static inline void put_signal_struct(struct signal_struct *sig)
|
||||||
{
|
{
|
||||||
if (atomic_dec_and_test(&sig->sigcnt)) {
|
if (atomic_dec_and_test(&sig->sigcnt))
|
||||||
sched_autogroup_exit(sig);
|
|
||||||
free_signal_struct(sig);
|
free_signal_struct(sig);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void __put_task_struct(struct task_struct *tsk)
|
void __put_task_struct(struct task_struct *tsk)
|
||||||
|
@ -1318,7 +1317,7 @@ bad_fork_cleanup_mm:
|
||||||
}
|
}
|
||||||
bad_fork_cleanup_signal:
|
bad_fork_cleanup_signal:
|
||||||
if (!(clone_flags & CLONE_THREAD))
|
if (!(clone_flags & CLONE_THREAD))
|
||||||
put_signal_struct(p->signal);
|
free_signal_struct(p->signal);
|
||||||
bad_fork_cleanup_sighand:
|
bad_fork_cleanup_sighand:
|
||||||
__cleanup_sighand(p->sighand);
|
__cleanup_sighand(p->sighand);
|
||||||
bad_fork_cleanup_fs:
|
bad_fork_cleanup_fs:
|
||||||
|
|
|
@ -577,7 +577,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
|
||||||
*/
|
*/
|
||||||
static int irq_thread(void *data)
|
static int irq_thread(void *data)
|
||||||
{
|
{
|
||||||
static struct sched_param param = {
|
static const struct sched_param param = {
|
||||||
.sched_priority = MAX_USER_RT_PRIO/2,
|
.sched_priority = MAX_USER_RT_PRIO/2,
|
||||||
};
|
};
|
||||||
struct irqaction *action = data;
|
struct irqaction *action = data;
|
||||||
|
|
|
@ -148,7 +148,7 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
|
||||||
wait_for_completion(&create.done);
|
wait_for_completion(&create.done);
|
||||||
|
|
||||||
if (!IS_ERR(create.result)) {
|
if (!IS_ERR(create.result)) {
|
||||||
static struct sched_param param = { .sched_priority = 0 };
|
static const struct sched_param param = { .sched_priority = 0 };
|
||||||
va_list args;
|
va_list args;
|
||||||
|
|
||||||
va_start(args, namefmt);
|
va_start(args, namefmt);
|
||||||
|
|
|
@ -278,14 +278,12 @@ struct task_group {
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
#define root_task_group init_task_group
|
|
||||||
|
|
||||||
/* task_group_lock serializes the addition/removal of task groups */
|
/* task_group_lock serializes the addition/removal of task groups */
|
||||||
static DEFINE_SPINLOCK(task_group_lock);
|
static DEFINE_SPINLOCK(task_group_lock);
|
||||||
|
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
|
|
||||||
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
|
# define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A weight of 0 or 1 can cause arithmetics problems.
|
* A weight of 0 or 1 can cause arithmetics problems.
|
||||||
|
@ -298,13 +296,13 @@ static DEFINE_SPINLOCK(task_group_lock);
|
||||||
#define MIN_SHARES 2
|
#define MIN_SHARES 2
|
||||||
#define MAX_SHARES (1UL << 18)
|
#define MAX_SHARES (1UL << 18)
|
||||||
|
|
||||||
static int init_task_group_load = INIT_TASK_GROUP_LOAD;
|
static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Default task group.
|
/* Default task group.
|
||||||
* Every task in system belong to this group at bootup.
|
* Every task in system belong to this group at bootup.
|
||||||
*/
|
*/
|
||||||
struct task_group init_task_group;
|
struct task_group root_task_group;
|
||||||
|
|
||||||
#endif /* CONFIG_CGROUP_SCHED */
|
#endif /* CONFIG_CGROUP_SCHED */
|
||||||
|
|
||||||
|
@ -743,7 +741,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
|
||||||
buf[cnt] = 0;
|
buf[cnt] = 0;
|
||||||
cmp = strstrip(buf);
|
cmp = strstrip(buf);
|
||||||
|
|
||||||
if (strncmp(buf, "NO_", 3) == 0) {
|
if (strncmp(cmp, "NO_", 3) == 0) {
|
||||||
neg = 1;
|
neg = 1;
|
||||||
cmp += 3;
|
cmp += 3;
|
||||||
}
|
}
|
||||||
|
@ -7848,7 +7846,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
|
||||||
cfs_rq->tg = tg;
|
cfs_rq->tg = tg;
|
||||||
|
|
||||||
tg->se[cpu] = se;
|
tg->se[cpu] = se;
|
||||||
/* se could be NULL for init_task_group */
|
/* se could be NULL for root_task_group */
|
||||||
if (!se)
|
if (!se)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -7908,18 +7906,18 @@ void __init sched_init(void)
|
||||||
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
|
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
|
||||||
|
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
init_task_group.se = (struct sched_entity **)ptr;
|
root_task_group.se = (struct sched_entity **)ptr;
|
||||||
ptr += nr_cpu_ids * sizeof(void **);
|
ptr += nr_cpu_ids * sizeof(void **);
|
||||||
|
|
||||||
init_task_group.cfs_rq = (struct cfs_rq **)ptr;
|
root_task_group.cfs_rq = (struct cfs_rq **)ptr;
|
||||||
ptr += nr_cpu_ids * sizeof(void **);
|
ptr += nr_cpu_ids * sizeof(void **);
|
||||||
|
|
||||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||||
#ifdef CONFIG_RT_GROUP_SCHED
|
#ifdef CONFIG_RT_GROUP_SCHED
|
||||||
init_task_group.rt_se = (struct sched_rt_entity **)ptr;
|
root_task_group.rt_se = (struct sched_rt_entity **)ptr;
|
||||||
ptr += nr_cpu_ids * sizeof(void **);
|
ptr += nr_cpu_ids * sizeof(void **);
|
||||||
|
|
||||||
init_task_group.rt_rq = (struct rt_rq **)ptr;
|
root_task_group.rt_rq = (struct rt_rq **)ptr;
|
||||||
ptr += nr_cpu_ids * sizeof(void **);
|
ptr += nr_cpu_ids * sizeof(void **);
|
||||||
|
|
||||||
#endif /* CONFIG_RT_GROUP_SCHED */
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
||||||
|
@ -7939,13 +7937,13 @@ void __init sched_init(void)
|
||||||
global_rt_period(), global_rt_runtime());
|
global_rt_period(), global_rt_runtime());
|
||||||
|
|
||||||
#ifdef CONFIG_RT_GROUP_SCHED
|
#ifdef CONFIG_RT_GROUP_SCHED
|
||||||
init_rt_bandwidth(&init_task_group.rt_bandwidth,
|
init_rt_bandwidth(&root_task_group.rt_bandwidth,
|
||||||
global_rt_period(), global_rt_runtime());
|
global_rt_period(), global_rt_runtime());
|
||||||
#endif /* CONFIG_RT_GROUP_SCHED */
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
||||||
|
|
||||||
#ifdef CONFIG_CGROUP_SCHED
|
#ifdef CONFIG_CGROUP_SCHED
|
||||||
list_add(&init_task_group.list, &task_groups);
|
list_add(&root_task_group.list, &task_groups);
|
||||||
INIT_LIST_HEAD(&init_task_group.children);
|
INIT_LIST_HEAD(&root_task_group.children);
|
||||||
autogroup_init(&init_task);
|
autogroup_init(&init_task);
|
||||||
#endif /* CONFIG_CGROUP_SCHED */
|
#endif /* CONFIG_CGROUP_SCHED */
|
||||||
|
|
||||||
|
@ -7960,34 +7958,34 @@ void __init sched_init(void)
|
||||||
init_cfs_rq(&rq->cfs, rq);
|
init_cfs_rq(&rq->cfs, rq);
|
||||||
init_rt_rq(&rq->rt, rq);
|
init_rt_rq(&rq->rt, rq);
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
init_task_group.shares = init_task_group_load;
|
root_task_group.shares = root_task_group_load;
|
||||||
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
|
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
|
||||||
/*
|
/*
|
||||||
* How much cpu bandwidth does init_task_group get?
|
* How much cpu bandwidth does root_task_group get?
|
||||||
*
|
*
|
||||||
* In case of task-groups formed thr' the cgroup filesystem, it
|
* In case of task-groups formed thr' the cgroup filesystem, it
|
||||||
* gets 100% of the cpu resources in the system. This overall
|
* gets 100% of the cpu resources in the system. This overall
|
||||||
* system cpu resource is divided among the tasks of
|
* system cpu resource is divided among the tasks of
|
||||||
* init_task_group and its child task-groups in a fair manner,
|
* root_task_group and its child task-groups in a fair manner,
|
||||||
* based on each entity's (task or task-group's) weight
|
* based on each entity's (task or task-group's) weight
|
||||||
* (se->load.weight).
|
* (se->load.weight).
|
||||||
*
|
*
|
||||||
* In other words, if init_task_group has 10 tasks of weight
|
* In other words, if root_task_group has 10 tasks of weight
|
||||||
* 1024) and two child groups A0 and A1 (of weight 1024 each),
|
* 1024) and two child groups A0 and A1 (of weight 1024 each),
|
||||||
* then A0's share of the cpu resource is:
|
* then A0's share of the cpu resource is:
|
||||||
*
|
*
|
||||||
* A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
|
* A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
|
||||||
*
|
*
|
||||||
* We achieve this by letting init_task_group's tasks sit
|
* We achieve this by letting root_task_group's tasks sit
|
||||||
* directly in rq->cfs (i.e init_task_group->se[] = NULL).
|
* directly in rq->cfs (i.e root_task_group->se[] = NULL).
|
||||||
*/
|
*/
|
||||||
init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, NULL);
|
init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
|
||||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||||
|
|
||||||
rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
|
rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
|
||||||
#ifdef CONFIG_RT_GROUP_SCHED
|
#ifdef CONFIG_RT_GROUP_SCHED
|
||||||
INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
|
INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
|
||||||
init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, NULL);
|
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
|
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
|
||||||
|
@ -8379,6 +8377,7 @@ static void free_sched_group(struct task_group *tg)
|
||||||
{
|
{
|
||||||
free_fair_sched_group(tg);
|
free_fair_sched_group(tg);
|
||||||
free_rt_sched_group(tg);
|
free_rt_sched_group(tg);
|
||||||
|
autogroup_free(tg);
|
||||||
kfree(tg);
|
kfree(tg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8812,7 +8811,7 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
|
||||||
|
|
||||||
if (!cgrp->parent) {
|
if (!cgrp->parent) {
|
||||||
/* This is early initialization for the top cgroup */
|
/* This is early initialization for the top cgroup */
|
||||||
return &init_task_group.css;
|
return &root_task_group.css;
|
||||||
}
|
}
|
||||||
|
|
||||||
parent = cgroup_tg(cgrp->parent);
|
parent = cgroup_tg(cgrp->parent);
|
||||||
|
|
|
@ -9,10 +9,10 @@ unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
|
||||||
static struct autogroup autogroup_default;
|
static struct autogroup autogroup_default;
|
||||||
static atomic_t autogroup_seq_nr;
|
static atomic_t autogroup_seq_nr;
|
||||||
|
|
||||||
static void autogroup_init(struct task_struct *init_task)
|
static void __init autogroup_init(struct task_struct *init_task)
|
||||||
{
|
{
|
||||||
autogroup_default.tg = &init_task_group;
|
autogroup_default.tg = &root_task_group;
|
||||||
init_task_group.autogroup = &autogroup_default;
|
root_task_group.autogroup = &autogroup_default;
|
||||||
kref_init(&autogroup_default.kref);
|
kref_init(&autogroup_default.kref);
|
||||||
init_rwsem(&autogroup_default.lock);
|
init_rwsem(&autogroup_default.lock);
|
||||||
init_task->signal->autogroup = &autogroup_default;
|
init_task->signal->autogroup = &autogroup_default;
|
||||||
|
@ -63,7 +63,7 @@ static inline struct autogroup *autogroup_create(void)
|
||||||
if (!ag)
|
if (!ag)
|
||||||
goto out_fail;
|
goto out_fail;
|
||||||
|
|
||||||
tg = sched_create_group(&init_task_group);
|
tg = sched_create_group(&root_task_group);
|
||||||
|
|
||||||
if (IS_ERR(tg))
|
if (IS_ERR(tg))
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
|
|
@ -853,7 +853,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
|
||||||
cpumask_any(cpu_online_mask));
|
cpumask_any(cpu_online_mask));
|
||||||
case CPU_DEAD:
|
case CPU_DEAD:
|
||||||
case CPU_DEAD_FROZEN: {
|
case CPU_DEAD_FROZEN: {
|
||||||
static struct sched_param param = {
|
static const struct sched_param param = {
|
||||||
.sched_priority = MAX_RT_PRIO-1
|
.sched_priority = MAX_RT_PRIO-1
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -558,7 +558,7 @@ trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
|
||||||
static int trace_wakeup_test_thread(void *data)
|
static int trace_wakeup_test_thread(void *data)
|
||||||
{
|
{
|
||||||
/* Make this a RT thread, doesn't need to be too high */
|
/* Make this a RT thread, doesn't need to be too high */
|
||||||
static struct sched_param param = { .sched_priority = 5 };
|
static const struct sched_param param = { .sched_priority = 5 };
|
||||||
struct completion *x = data;
|
struct completion *x = data;
|
||||||
|
|
||||||
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче