freezer: kill unused set_freezable_with_signal()
There's no in-kernel user of set_freezable_with_signal() left. Mixing TIF_SIGPENDING with kernel threads can lead to nasty corner cases as kernel threads never travel signal delivery path on their own. e.g. the current implementation is buggy in the cancelation path of __thaw_task(). It calls recalc_sigpending_and_wake() in an attempt to clear TIF_SIGPENDING but the function never clears it regardless of sigpending state. This means that signallable freezable kthreads may continue executing with !freezing() && stuck TIF_SIGPENDING, which can be troublesome. This patch removes set_freezable_with_signal() along with PF_FREEZER_NOSIG and recalc_sigpending*() calls in freezer. User tasks get TIF_SIGPENDING, kernel tasks get woken up and the spurious sigpending is dealt with in the usual signal delivery path. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Oleg Nesterov <oleg@redhat.com>
This commit is contained in:
Родитель
adfa543e73
Коммит
34b087e483
|
@ -49,7 +49,7 @@ static inline bool try_to_freeze(void)
|
|||
}
|
||||
|
||||
extern bool freeze_task(struct task_struct *p);
|
||||
extern bool __set_freezable(bool with_signal);
|
||||
extern bool set_freezable(void);
|
||||
|
||||
#ifdef CONFIG_CGROUP_FREEZER
|
||||
extern bool cgroup_freezing(struct task_struct *task);
|
||||
|
@ -104,23 +104,6 @@ static inline int freezer_should_skip(struct task_struct *p)
|
|||
return !!(p->flags & PF_FREEZER_SKIP);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell the freezer that the current task should be frozen by it
|
||||
*/
|
||||
static inline bool set_freezable(void)
|
||||
{
|
||||
return __set_freezable(false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell the freezer that the current task should be frozen by it and that it
|
||||
* should send a fake signal to the task to freeze it.
|
||||
*/
|
||||
static inline bool set_freezable_with_signal(void)
|
||||
{
|
||||
return __set_freezable(true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Freezer-friendly wrappers around wait_event_interruptible(),
|
||||
* wait_event_killable() and wait_event_interruptible_timeout(), originally
|
||||
|
@ -176,7 +159,6 @@ static inline void freezer_do_not_count(void) {}
|
|||
static inline void freezer_count(void) {}
|
||||
static inline int freezer_should_skip(struct task_struct *p) { return 0; }
|
||||
static inline void set_freezable(void) {}
|
||||
static inline void set_freezable_with_signal(void) {}
|
||||
|
||||
#define wait_event_freezable(wq, condition) \
|
||||
wait_event_interruptible(wq, condition)
|
||||
|
|
|
@ -1788,7 +1788,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
|
|||
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
|
||||
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
|
||||
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
|
||||
#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
|
||||
|
||||
/*
|
||||
* Only the _current_ task can read/write to tsk->flags, but other
|
||||
|
|
|
@ -39,7 +39,7 @@ bool freezing_slow_path(struct task_struct *p)
|
|||
if (pm_nosig_freezing || cgroup_freezing(p))
|
||||
return true;
|
||||
|
||||
if (pm_freezing && !(p->flags & PF_FREEZER_NOSIG))
|
||||
if (pm_freezing && !(p->flags & PF_KTHREAD))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -72,10 +72,6 @@ bool __refrigerator(bool check_kthr_stop)
|
|||
schedule();
|
||||
}
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
recalc_sigpending(); /* We sent fake signal, clean it up */
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
pr_debug("%s left refrigerator\n", current->comm);
|
||||
|
||||
/*
|
||||
|
@ -120,7 +116,7 @@ bool freeze_task(struct task_struct *p)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!(p->flags & PF_FREEZER_NOSIG)) {
|
||||
if (!(p->flags & PF_KTHREAD)) {
|
||||
fake_signal_wake_up(p);
|
||||
/*
|
||||
* fake_signal_wake_up() goes through p's scheduler
|
||||
|
@ -145,28 +141,19 @@ void __thaw_task(struct task_struct *p)
|
|||
* be visible to @p as waking up implies wmb. Waking up inside
|
||||
* freezer_lock also prevents wakeups from leaking outside
|
||||
* refrigerator.
|
||||
*
|
||||
* If !FROZEN, @p hasn't reached refrigerator, recalc sigpending to
|
||||
* avoid leaving dangling TIF_SIGPENDING behind.
|
||||
*/
|
||||
spin_lock_irqsave(&freezer_lock, flags);
|
||||
if (frozen(p)) {
|
||||
if (frozen(p))
|
||||
wake_up_process(p);
|
||||
} else {
|
||||
spin_lock(&p->sighand->siglock);
|
||||
recalc_sigpending_and_wake(p);
|
||||
spin_unlock(&p->sighand->siglock);
|
||||
}
|
||||
spin_unlock_irqrestore(&freezer_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* __set_freezable - make %current freezable
|
||||
* @with_signal: do we want %TIF_SIGPENDING for notification too?
|
||||
* set_freezable - make %current freezable
|
||||
*
|
||||
* Mark %current freezable and enter refrigerator if necessary.
|
||||
*/
|
||||
bool __set_freezable(bool with_signal)
|
||||
bool set_freezable(void)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
|
@ -177,10 +164,8 @@ bool __set_freezable(bool with_signal)
|
|||
*/
|
||||
spin_lock_irq(&freezer_lock);
|
||||
current->flags &= ~PF_NOFREEZE;
|
||||
if (with_signal)
|
||||
current->flags &= ~PF_FREEZER_NOSIG;
|
||||
spin_unlock_irq(&freezer_lock);
|
||||
|
||||
return try_to_freeze();
|
||||
}
|
||||
EXPORT_SYMBOL(__set_freezable);
|
||||
EXPORT_SYMBOL(set_freezable);
|
||||
|
|
|
@ -282,7 +282,7 @@ int kthreadd(void *unused)
|
|||
set_cpus_allowed_ptr(tsk, cpu_all_mask);
|
||||
set_mems_allowed(node_states[N_HIGH_MEMORY]);
|
||||
|
||||
current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
|
||||
current->flags |= PF_NOFREEZE;
|
||||
|
||||
for (;;) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
|
Загрузка…
Ссылка в новой задаче