Merge branch 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

* 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  PM / Freezer: Revert 27920651fe "PM / Freezer: Make fake_signal_wake_up() wake TASK_KILLABLE tasks too"
  PM / Freezer: Reimplement wait_event_freezekillable using freezer_do_not_count/freezer_count
  USB: Update last_busy time after autosuspend fails
  PM / Runtime: Automatically retry failed autosuspends
  PM / QoS: Remove redundant check
  PM / OPP: Fix build when CONFIG_PM_OPP is not set
  PM / Runtime: Fix runtime accounting calculation error
  PM / Sleep: Update freezer documentation
  PM / Sleep: Remove unused symbol 'suspend_cpu_hotplug'
  PM / Sleep: Fix race between CPU hotplug and freezer
  ACPI / PM: Add Sony VPCEB17FX to nonvs blacklist
This commit is contained in:
Linus Torvalds 2011-11-05 17:54:18 -07:00
Родитель 06d8eb1b7d d6cc76856d
Коммит 8110efc64c
11 изменённых файлов: 125 добавлений и 27 удалений

Просмотреть файл

@ -22,12 +22,12 @@ try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
either wakes them up, if they are kernel threads, or sends fake signals to them,
if they are user space processes. A task that has TIF_FREEZE set, should react
to it by calling the function called refrigerator() (defined in
kernel/power/process.c), which sets the task's PF_FROZEN flag, changes its state
kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
Then, we say that the task is 'frozen' and therefore the set of functions
handling this mechanism is referred to as 'the freezer' (these functions are
defined in kernel/power/process.c and include/linux/freezer.h). User space
processes are generally frozen before kernel threads.
defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
User space processes are generally frozen before kernel threads.
It is not recommended to call refrigerator() directly. Instead, it is
recommended to use the try_to_freeze() function (defined in
@ -95,7 +95,7 @@ after the memory for the image has been freed, we don't want tasks to allocate
additional memory and we prevent them from doing that by freezing them earlier.
[Of course, this also means that device drivers should not allocate substantial
amounts of memory from their .suspend() callbacks before hibernation, but this
is e separate issue.]
is a separate issue.]
3. The third reason is to prevent user space processes and some kernel threads
from interfering with the suspending and resuming of devices. A user space

Просмотреть файл

@ -789,6 +789,16 @@ will behave normally, not taking the autosuspend delay into account.
Similarly, if the power.use_autosuspend field isn't set then the autosuspend
helper functions will behave just like the non-autosuspend counterparts.
Under some circumstances a driver or subsystem may want to prevent a device
from autosuspending immediately, even though the usage counter is zero and the
autosuspend delay time has expired. If the ->runtime_suspend() callback
returns -EAGAIN or -EBUSY, and if the next autosuspend delay expiration time is
in the future (as it normally would be if the callback invoked
pm_runtime_mark_last_busy()), the PM core will automatically reschedule the
autosuspend. The ->runtime_suspend() callback can't do this rescheduling
itself because no suspend requests of any kind are accepted while the device is
suspending (i.e., while the callback is running).
The implementation is well suited for asynchronous use in interrupt contexts.
However such use inevitably involves races, because the PM core can't
synchronize ->runtime_suspend() callbacks with the arrival of I/O requests.

Просмотреть файл

@ -398,6 +398,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VPCEB17FX",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VGN-SR11M",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),

Просмотреть файл

@ -29,13 +29,10 @@ static int rpm_suspend(struct device *dev, int rpmflags);
void update_pm_runtime_accounting(struct device *dev)
{
unsigned long now = jiffies;
int delta;
unsigned long delta;
delta = now - dev->power.accounting_timestamp;
if (delta < 0)
delta = 0;
dev->power.accounting_timestamp = now;
if (dev->power.disable_depth > 0)
@ -296,6 +293,9 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
* the callback was running then carry it out, otherwise send an idle
* notification for its parent (if the suspend succeeded and both
* ignore_children of parent->power and irq_safe of dev->power are not set).
* If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
* flag is set and the next autosuspend-delay expiration time is in the
* future, schedule another autosuspend attempt.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
@ -416,10 +416,21 @@ static int rpm_suspend(struct device *dev, int rpmflags)
if (retval) {
__update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false;
if (retval == -EAGAIN || retval == -EBUSY)
if (retval == -EAGAIN || retval == -EBUSY) {
dev->power.runtime_error = 0;
else
/*
* If the callback routine failed an autosuspend, and
* if the last_busy time has been updated so that there
* is a new autosuspend expiration time, automatically
* reschedule another autosuspend.
*/
if ((rpmflags & RPM_AUTO) &&
pm_runtime_autosuspend_expiration(dev) != 0)
goto repeat;
} else {
pm_runtime_cancel_pending(dev);
}
wake_up_all(&dev->power.wait_queue);
goto out;
}

Просмотреть файл

@ -1667,6 +1667,11 @@ int usb_runtime_suspend(struct device *dev)
return -EAGAIN;
status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
/* Allow a retry if autosuspend failed temporarily */
if (status == -EAGAIN || status == -EBUSY)
usb_mark_last_busy(udev);
/* The PM core reacts badly unless the return code is 0,
* -EAGAIN, or -EBUSY, so always return -EBUSY on an error.
*/

Просмотреть файл

@ -196,13 +196,9 @@ static inline void cpu_hotplug_driver_unlock(void)
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_PM_SLEEP_SMP
extern int suspend_cpu_hotplug;
extern int disable_nonboot_cpus(void);
extern void enable_nonboot_cpus(void);
#else /* !CONFIG_PM_SLEEP_SMP */
#define suspend_cpu_hotplug 0
static inline int disable_nonboot_cpus(void) { return 0; }
static inline void enable_nonboot_cpus(void) {}
#endif /* !CONFIG_PM_SLEEP_SMP */

Просмотреть файл

@ -143,14 +143,9 @@ static inline void set_freezable_with_signal(void)
#define wait_event_freezekillable(wq, condition) \
({ \
int __retval; \
do { \
__retval = wait_event_killable(wq, \
(condition) || freezing(current)); \
if (__retval && !freezing(current)) \
break; \
else if (!(condition)) \
__retval = -ERESTARTSYS; \
} while (try_to_freeze()); \
freezer_do_not_count(); \
__retval = wait_event_killable(wq, (condition)); \
freezer_count(); \
__retval; \
})

Просмотреть файл

@ -97,11 +97,11 @@ static inline int opp_disable(struct device *dev, unsigned long freq)
return 0;
}
struct srcu_notifier_head *opp_get_notifier(struct device *dev)
static inline struct srcu_notifier_head *opp_get_notifier(struct device *dev)
{
return ERR_PTR(-EINVAL);
}
#endif /* CONFIG_PM */
#endif /* CONFIG_PM_OPP */
#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
int opp_init_cpufreq_table(struct device *dev,

Просмотреть файл

@ -15,6 +15,7 @@
#include <linux/stop_machine.h>
#include <linux/mutex.h>
#include <linux/gfp.h>
#include <linux/suspend.h>
#ifdef CONFIG_SMP
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
@ -476,6 +477,79 @@ static int alloc_frozen_cpus(void)
return 0;
}
core_initcall(alloc_frozen_cpus);
/*
* Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
* hotplug when tasks are about to be frozen. Also, don't allow the freezer
* to continue until any currently running CPU hotplug operation gets
* completed.
* To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
* 'cpu_add_remove_lock'. And this same lock is also taken by the regular
* CPU hotplug path and released only after it is complete. Thus, we
* (and hence the freezer) will block here until any currently running CPU
* hotplug operation gets completed.
*/
void cpu_hotplug_disable_before_freeze(void)
{
cpu_maps_update_begin();
cpu_hotplug_disabled = 1;
cpu_maps_update_done();
}
/*
* When tasks have been thawed, re-enable regular CPU hotplug (which had been
* disabled while beginning to freeze tasks).
*/
void cpu_hotplug_enable_after_thaw(void)
{
cpu_maps_update_begin();
cpu_hotplug_disabled = 0;
cpu_maps_update_done();
}
/*
* When callbacks for CPU hotplug notifications are being executed, we must
* ensure that the state of the system with respect to the tasks being frozen
* or not, as reported by the notification, remains unchanged *throughout the
* duration* of the execution of the callbacks.
* Hence we need to prevent the freezer from racing with regular CPU hotplug.
*
* This synchronization is implemented by mutually excluding regular CPU
* hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
* Hibernate notifications.
*/
static int
cpu_hotplug_pm_callback(struct notifier_block *nb,
unsigned long action, void *ptr)
{
switch (action) {
case PM_SUSPEND_PREPARE:
case PM_HIBERNATION_PREPARE:
cpu_hotplug_disable_before_freeze();
break;
case PM_POST_SUSPEND:
case PM_POST_HIBERNATION:
cpu_hotplug_enable_after_thaw();
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
int cpu_hotplug_pm_sync_init(void)
{
pm_notifier(cpu_hotplug_pm_callback, 0);
return 0;
}
core_initcall(cpu_hotplug_pm_sync_init);
#endif /* CONFIG_PM_SLEEP_SMP */
/**

Просмотреть файл

@ -67,7 +67,7 @@ static void fake_signal_wake_up(struct task_struct *p)
unsigned long flags;
spin_lock_irqsave(&p->sighand->siglock, flags);
signal_wake_up(p, 1);
signal_wake_up(p, 0);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
}

Просмотреть файл

@ -386,8 +386,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
filp->private_data = req;
if (filp->private_data)
return 0;
return 0;
}
return -EPERM;
}