Merge branch 'pm-sleep' into pm-for-linus
* pm-sleep: (51 commits) PM: Drop generic_subsys_pm_ops PM / Sleep: Remove forward-only callbacks from AMBA bus type PM / Sleep: Remove forward-only callbacks from platform bus type PM: Run the driver callback directly if the subsystem one is not there PM / Sleep: Make pm_op() and pm_noirq_op() return callback pointers PM / Sleep: Merge internal functions in generic_ops.c PM / Sleep: Simplify generic system suspend callbacks PM / Hibernate: Remove deprecated hibernation snapshot ioctls PM / Sleep: Fix freezer failures due to racy usermodehelper_is_disabled() PM / Sleep: Recommend [un]lock_system_sleep() over using pm_mutex directly PM / Sleep: Replace mutex_[un]lock(&pm_mutex) with [un]lock_system_sleep() PM / Sleep: Make [un]lock_system_sleep() generic PM / Sleep: Use the freezer_count() functions in [un]lock_system_sleep() APIs PM / Freezer: Remove the "userspace only" constraint from freezer[_do_not]_count() PM / Hibernate: Replace unintuitive 'if' condition in kernel/power/user.c with 'else' Freezer / sunrpc / NFS: don't allow TASK_KILLABLE sleeps to block the freezer PM / Sleep: Unify diagnostic messages from device suspend/resume ACPI / PM: Do not save/restore NVS on Asus K54C/K54HR PM / Hibernate: Remove deprecated hibernation test modes PM / Hibernate: Thaw processes in SNAPSHOT_CREATE_IMAGE ioctl test path ... Conflicts: kernel/kmod.c
This commit is contained in:
Коммит
b7ba68c4a0
|
@ -85,17 +85,6 @@ Who: Robin Getz <rgetz@blackfin.uclinux.org> & Matt Mackall <mpm@selenic.com>
|
||||||
|
|
||||||
---------------------------
|
---------------------------
|
||||||
|
|
||||||
What: Deprecated snapshot ioctls
|
|
||||||
When: 2.6.36
|
|
||||||
|
|
||||||
Why: The ioctls in kernel/power/user.c were marked as deprecated long time
|
|
||||||
ago. Now they notify users about that so that they need to replace
|
|
||||||
their userspace. After some more time, remove them completely.
|
|
||||||
|
|
||||||
Who: Jiri Slaby <jirislaby@gmail.com>
|
|
||||||
|
|
||||||
---------------------------
|
|
||||||
|
|
||||||
What: The ieee80211_regdom module parameter
|
What: The ieee80211_regdom module parameter
|
||||||
When: March 2010 / desktop catchup
|
When: March 2010 / desktop catchup
|
||||||
|
|
||||||
|
|
|
@ -126,7 +126,9 @@ The core methods to suspend and resume devices reside in struct dev_pm_ops
|
||||||
pointed to by the ops member of struct dev_pm_domain, or by the pm member of
|
pointed to by the ops member of struct dev_pm_domain, or by the pm member of
|
||||||
struct bus_type, struct device_type and struct class. They are mostly of
|
struct bus_type, struct device_type and struct class. They are mostly of
|
||||||
interest to the people writing infrastructure for platforms and buses, like PCI
|
interest to the people writing infrastructure for platforms and buses, like PCI
|
||||||
or USB, or device type and device class drivers.
|
or USB, or device type and device class drivers. They also are relevant to the
|
||||||
|
writers of device drivers whose subsystems (PM domains, device types, device
|
||||||
|
classes and bus types) don't provide all power management methods.
|
||||||
|
|
||||||
Bus drivers implement these methods as appropriate for the hardware and the
|
Bus drivers implement these methods as appropriate for the hardware and the
|
||||||
drivers using it; PCI works differently from USB, and so on. Not many people
|
drivers using it; PCI works differently from USB, and so on. Not many people
|
||||||
|
@ -268,32 +270,35 @@ various phases always run after tasks have been frozen and before they are
|
||||||
unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have
|
unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have
|
||||||
been disabled (except for those marked with the IRQF_NO_SUSPEND flag).
|
been disabled (except for those marked with the IRQF_NO_SUSPEND flag).
|
||||||
|
|
||||||
All phases use PM domain, bus, type, or class callbacks (that is, methods
|
All phases use PM domain, bus, type, class or driver callbacks (that is, methods
|
||||||
defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, or dev->class->pm).
|
defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, dev->class->pm or
|
||||||
These callbacks are regarded by the PM core as mutually exclusive. Moreover,
|
dev->driver->pm). These callbacks are regarded by the PM core as mutually
|
||||||
PM domain callbacks always take precedence over bus, type and class callbacks,
|
exclusive. Moreover, PM domain callbacks always take precedence over all of the
|
||||||
while type callbacks take precedence over bus and class callbacks, and class
|
other callbacks and, for example, type callbacks take precedence over bus, class
|
||||||
callbacks take precedence over bus callbacks. To be precise, the following
|
and driver callbacks. To be precise, the following rules are used to determine
|
||||||
rules are used to determine which callback to execute in the given phase:
|
which callback to execute in the given phase:
|
||||||
|
|
||||||
1. If dev->pm_domain is present, the PM core will attempt to execute the
|
1. If dev->pm_domain is present, the PM core will choose the callback
|
||||||
callback included in dev->pm_domain->ops. If that callback is not
|
included in dev->pm_domain->ops for execution
|
||||||
present, no action will be carried out for the given device.
|
|
||||||
|
|
||||||
2. Otherwise, if both dev->type and dev->type->pm are present, the callback
|
2. Otherwise, if both dev->type and dev->type->pm are present, the callback
|
||||||
included in dev->type->pm will be executed.
|
included in dev->type->pm will be chosen for execution.
|
||||||
|
|
||||||
3. Otherwise, if both dev->class and dev->class->pm are present, the
|
3. Otherwise, if both dev->class and dev->class->pm are present, the
|
||||||
callback included in dev->class->pm will be executed.
|
callback included in dev->class->pm will be chosen for execution.
|
||||||
|
|
||||||
4. Otherwise, if both dev->bus and dev->bus->pm are present, the callback
|
4. Otherwise, if both dev->bus and dev->bus->pm are present, the callback
|
||||||
included in dev->bus->pm will be executed.
|
included in dev->bus->pm will be chosen for execution.
|
||||||
|
|
||||||
This allows PM domains and device types to override callbacks provided by bus
|
This allows PM domains and device types to override callbacks provided by bus
|
||||||
types or device classes if necessary.
|
types or device classes if necessary.
|
||||||
|
|
||||||
These callbacks may in turn invoke device- or driver-specific methods stored in
|
The PM domain, type, class and bus callbacks may in turn invoke device- or
|
||||||
dev->driver->pm, but they don't have to.
|
driver-specific methods stored in dev->driver->pm, but they don't have to do
|
||||||
|
that.
|
||||||
|
|
||||||
|
If the subsystem callback chosen for execution is not present, the PM core will
|
||||||
|
execute the corresponding method from dev->driver->pm instead if there is one.
|
||||||
|
|
||||||
|
|
||||||
Entering System Suspend
|
Entering System Suspend
|
||||||
|
|
|
@ -21,7 +21,7 @@ freeze_processes() (defined in kernel/power/process.c) is called. It executes
|
||||||
try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
|
try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
|
||||||
either wakes them up, if they are kernel threads, or sends fake signals to them,
|
either wakes them up, if they are kernel threads, or sends fake signals to them,
|
||||||
if they are user space processes. A task that has TIF_FREEZE set, should react
|
if they are user space processes. A task that has TIF_FREEZE set, should react
|
||||||
to it by calling the function called refrigerator() (defined in
|
to it by calling the function called __refrigerator() (defined in
|
||||||
kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
|
kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
|
||||||
to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
|
to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
|
||||||
Then, we say that the task is 'frozen' and therefore the set of functions
|
Then, we say that the task is 'frozen' and therefore the set of functions
|
||||||
|
@ -29,10 +29,10 @@ handling this mechanism is referred to as 'the freezer' (these functions are
|
||||||
defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
|
defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
|
||||||
User space processes are generally frozen before kernel threads.
|
User space processes are generally frozen before kernel threads.
|
||||||
|
|
||||||
It is not recommended to call refrigerator() directly. Instead, it is
|
__refrigerator() must not be called directly. Instead, use the
|
||||||
recommended to use the try_to_freeze() function (defined in
|
try_to_freeze() function (defined in include/linux/freezer.h), that checks
|
||||||
include/linux/freezer.h), that checks the task's TIF_FREEZE flag and makes the
|
the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the
|
||||||
task enter refrigerator() if the flag is set.
|
flag is set.
|
||||||
|
|
||||||
For user space processes try_to_freeze() is called automatically from the
|
For user space processes try_to_freeze() is called automatically from the
|
||||||
signal-handling code, but the freezable kernel threads need to call it
|
signal-handling code, but the freezable kernel threads need to call it
|
||||||
|
@ -61,13 +61,13 @@ wait_event_freezable() and wait_event_freezable_timeout() macros.
|
||||||
After the system memory state has been restored from a hibernation image and
|
After the system memory state has been restored from a hibernation image and
|
||||||
devices have been reinitialized, the function thaw_processes() is called in
|
devices have been reinitialized, the function thaw_processes() is called in
|
||||||
order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that
|
order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that
|
||||||
have been frozen leave refrigerator() and continue running.
|
have been frozen leave __refrigerator() and continue running.
|
||||||
|
|
||||||
III. Which kernel threads are freezable?
|
III. Which kernel threads are freezable?
|
||||||
|
|
||||||
Kernel threads are not freezable by default. However, a kernel thread may clear
|
Kernel threads are not freezable by default. However, a kernel thread may clear
|
||||||
PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE
|
PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE
|
||||||
directly is strongly discouraged). From this point it is regarded as freezable
|
directly is not allowed). From this point it is regarded as freezable
|
||||||
and must call try_to_freeze() in a suitable place.
|
and must call try_to_freeze() in a suitable place.
|
||||||
|
|
||||||
IV. Why do we do that?
|
IV. Why do we do that?
|
||||||
|
@ -176,3 +176,28 @@ tasks, since it generally exists anyway.
|
||||||
A driver must have all firmwares it may need in RAM before suspend() is called.
|
A driver must have all firmwares it may need in RAM before suspend() is called.
|
||||||
If keeping them is not practical, for example due to their size, they must be
|
If keeping them is not practical, for example due to their size, they must be
|
||||||
requested early enough using the suspend notifier API described in notifiers.txt.
|
requested early enough using the suspend notifier API described in notifiers.txt.
|
||||||
|
|
||||||
|
VI. Are there any precautions to be taken to prevent freezing failures?
|
||||||
|
|
||||||
|
Yes, there are.
|
||||||
|
|
||||||
|
First of all, grabbing the 'pm_mutex' lock to mutually exclude a piece of code
|
||||||
|
from system-wide sleep such as suspend/hibernation is not encouraged.
|
||||||
|
If possible, that piece of code must instead hook onto the suspend/hibernation
|
||||||
|
notifiers to achieve mutual exclusion. Look at the CPU-Hotplug code
|
||||||
|
(kernel/cpu.c) for an example.
|
||||||
|
|
||||||
|
However, if that is not feasible, and grabbing 'pm_mutex' is deemed necessary,
|
||||||
|
it is strongly discouraged to directly call mutex_[un]lock(&pm_mutex) since
|
||||||
|
that could lead to freezing failures, because if the suspend/hibernate code
|
||||||
|
successfully acquired the 'pm_mutex' lock, and hence that other entity failed
|
||||||
|
to acquire the lock, then that task would get blocked in TASK_UNINTERRUPTIBLE
|
||||||
|
state. As a consequence, the freezer would not be able to freeze that task,
|
||||||
|
leading to freezing failure.
|
||||||
|
|
||||||
|
However, the [un]lock_system_sleep() APIs are safe to use in this scenario,
|
||||||
|
since they ask the freezer to skip freezing this task, since it is anyway
|
||||||
|
"frozen enough" as it is blocked on 'pm_mutex', which will be released
|
||||||
|
only after the entire suspend/hibernation sequence is complete.
|
||||||
|
So, to summarize, use [un]lock_system_sleep() instead of directly using
|
||||||
|
mutex_[un]lock(&pm_mutex). That would prevent freezing failures.
|
||||||
|
|
|
@ -57,6 +57,10 @@ the following:
|
||||||
|
|
||||||
4. Bus type of the device, if both dev->bus and dev->bus->pm are present.
|
4. Bus type of the device, if both dev->bus and dev->bus->pm are present.
|
||||||
|
|
||||||
|
If the subsystem chosen by applying the above rules doesn't provide the relevant
|
||||||
|
callback, the PM core will invoke the corresponding driver callback stored in
|
||||||
|
dev->driver->pm directly (if present).
|
||||||
|
|
||||||
The PM core always checks which callback to use in the order given above, so the
|
The PM core always checks which callback to use in the order given above, so the
|
||||||
priority order of callbacks from high to low is: PM domain, device type, class
|
priority order of callbacks from high to low is: PM domain, device type, class
|
||||||
and bus type. Moreover, the high-priority one will always take precedence over
|
and bus type. Moreover, the high-priority one will always take precedence over
|
||||||
|
@ -64,86 +68,88 @@ a low-priority one. The PM domain, bus type, device type and class callbacks
|
||||||
are referred to as subsystem-level callbacks in what follows.
|
are referred to as subsystem-level callbacks in what follows.
|
||||||
|
|
||||||
By default, the callbacks are always invoked in process context with interrupts
|
By default, the callbacks are always invoked in process context with interrupts
|
||||||
enabled. However, subsystems can use the pm_runtime_irq_safe() helper function
|
enabled. However, the pm_runtime_irq_safe() helper function can be used to tell
|
||||||
to tell the PM core that their ->runtime_suspend(), ->runtime_resume() and
|
the PM core that it is safe to run the ->runtime_suspend(), ->runtime_resume()
|
||||||
->runtime_idle() callbacks may be invoked in atomic context with interrupts
|
and ->runtime_idle() callbacks for the given device in atomic context with
|
||||||
disabled for a given device. This implies that the callback routines in
|
interrupts disabled. This implies that the callback routines in question must
|
||||||
question must not block or sleep, but it also means that the synchronous helper
|
not block or sleep, but it also means that the synchronous helper functions
|
||||||
functions listed at the end of Section 4 may be used for that device within an
|
listed at the end of Section 4 may be used for that device within an interrupt
|
||||||
interrupt handler or generally in an atomic context.
|
handler or generally in an atomic context.
|
||||||
|
|
||||||
The subsystem-level suspend callback is _entirely_ _responsible_ for handling
|
The subsystem-level suspend callback, if present, is _entirely_ _responsible_
|
||||||
the suspend of the device as appropriate, which may, but need not include
|
for handling the suspend of the device as appropriate, which may, but need not
|
||||||
executing the device driver's own ->runtime_suspend() callback (from the
|
include executing the device driver's own ->runtime_suspend() callback (from the
|
||||||
PM core's point of view it is not necessary to implement a ->runtime_suspend()
|
PM core's point of view it is not necessary to implement a ->runtime_suspend()
|
||||||
callback in a device driver as long as the subsystem-level suspend callback
|
callback in a device driver as long as the subsystem-level suspend callback
|
||||||
knows what to do to handle the device).
|
knows what to do to handle the device).
|
||||||
|
|
||||||
* Once the subsystem-level suspend callback has completed successfully
|
* Once the subsystem-level suspend callback (or the driver suspend callback,
|
||||||
for given device, the PM core regards the device as suspended, which need
|
if invoked directly) has completed successfully for the given device, the PM
|
||||||
not mean that the device has been put into a low power state. It is
|
core regards the device as suspended, which need not mean that it has been
|
||||||
supposed to mean, however, that the device will not process data and will
|
put into a low power state. It is supposed to mean, however, that the
|
||||||
not communicate with the CPU(s) and RAM until the subsystem-level resume
|
device will not process data and will not communicate with the CPU(s) and
|
||||||
callback is executed for it. The runtime PM status of a device after
|
RAM until the appropriate resume callback is executed for it. The runtime
|
||||||
successful execution of the subsystem-level suspend callback is 'suspended'.
|
PM status of a device after successful execution of the suspend callback is
|
||||||
|
'suspended'.
|
||||||
|
|
||||||
* If the subsystem-level suspend callback returns -EBUSY or -EAGAIN,
|
* If the suspend callback returns -EBUSY or -EAGAIN, the device's runtime PM
|
||||||
the device's runtime PM status is 'active', which means that the device
|
status remains 'active', which means that the device _must_ be fully
|
||||||
_must_ be fully operational afterwards.
|
operational afterwards.
|
||||||
|
|
||||||
* If the subsystem-level suspend callback returns an error code different
|
* If the suspend callback returns an error code different from -EBUSY and
|
||||||
from -EBUSY or -EAGAIN, the PM core regards this as a fatal error and will
|
-EAGAIN, the PM core regards this as a fatal error and will refuse to run
|
||||||
refuse to run the helper functions described in Section 4 for the device,
|
the helper functions described in Section 4 for the device until its status
|
||||||
until the status of it is directly set either to 'active', or to 'suspended'
|
is directly set to either'active', or 'suspended' (the PM core provides
|
||||||
(the PM core provides special helper functions for this purpose).
|
special helper functions for this purpose).
|
||||||
|
|
||||||
In particular, if the driver requires remote wake-up capability (i.e. hardware
|
In particular, if the driver requires remote wakeup capability (i.e. hardware
|
||||||
mechanism allowing the device to request a change of its power state, such as
|
mechanism allowing the device to request a change of its power state, such as
|
||||||
PCI PME) for proper functioning and device_run_wake() returns 'false' for the
|
PCI PME) for proper functioning and device_run_wake() returns 'false' for the
|
||||||
device, then ->runtime_suspend() should return -EBUSY. On the other hand, if
|
device, then ->runtime_suspend() should return -EBUSY. On the other hand, if
|
||||||
device_run_wake() returns 'true' for the device and the device is put into a low
|
device_run_wake() returns 'true' for the device and the device is put into a
|
||||||
power state during the execution of the subsystem-level suspend callback, it is
|
low-power state during the execution of the suspend callback, it is expected
|
||||||
expected that remote wake-up will be enabled for the device. Generally, remote
|
that remote wakeup will be enabled for the device. Generally, remote wakeup
|
||||||
wake-up should be enabled for all input devices put into a low power state at
|
should be enabled for all input devices put into low-power states at run time.
|
||||||
run time.
|
|
||||||
|
|
||||||
The subsystem-level resume callback is _entirely_ _responsible_ for handling the
|
The subsystem-level resume callback, if present, is _entirely_ _responsible_ for
|
||||||
resume of the device as appropriate, which may, but need not include executing
|
handling the resume of the device as appropriate, which may, but need not
|
||||||
the device driver's own ->runtime_resume() callback (from the PM core's point of
|
include executing the device driver's own ->runtime_resume() callback (from the
|
||||||
view it is not necessary to implement a ->runtime_resume() callback in a device
|
PM core's point of view it is not necessary to implement a ->runtime_resume()
|
||||||
driver as long as the subsystem-level resume callback knows what to do to handle
|
callback in a device driver as long as the subsystem-level resume callback knows
|
||||||
the device).
|
what to do to handle the device).
|
||||||
|
|
||||||
* Once the subsystem-level resume callback has completed successfully, the PM
|
* Once the subsystem-level resume callback (or the driver resume callback, if
|
||||||
core regards the device as fully operational, which means that the device
|
invoked directly) has completed successfully, the PM core regards the device
|
||||||
_must_ be able to complete I/O operations as needed. The runtime PM status
|
as fully operational, which means that the device _must_ be able to complete
|
||||||
of the device is then 'active'.
|
I/O operations as needed. The runtime PM status of the device is then
|
||||||
|
'active'.
|
||||||
|
|
||||||
* If the subsystem-level resume callback returns an error code, the PM core
|
* If the resume callback returns an error code, the PM core regards this as a
|
||||||
regards this as a fatal error and will refuse to run the helper functions
|
fatal error and will refuse to run the helper functions described in Section
|
||||||
described in Section 4 for the device, until its status is directly set
|
4 for the device, until its status is directly set to either 'active', or
|
||||||
either to 'active' or to 'suspended' (the PM core provides special helper
|
'suspended' (by means of special helper functions provided by the PM core
|
||||||
functions for this purpose).
|
for this purpose).
|
||||||
|
|
||||||
The subsystem-level idle callback is executed by the PM core whenever the device
|
The idle callback (a subsystem-level one, if present, or the driver one) is
|
||||||
appears to be idle, which is indicated to the PM core by two counters, the
|
executed by the PM core whenever the device appears to be idle, which is
|
||||||
device's usage counter and the counter of 'active' children of the device.
|
indicated to the PM core by two counters, the device's usage counter and the
|
||||||
|
counter of 'active' children of the device.
|
||||||
|
|
||||||
* If any of these counters is decreased using a helper function provided by
|
* If any of these counters is decreased using a helper function provided by
|
||||||
the PM core and it turns out to be equal to zero, the other counter is
|
the PM core and it turns out to be equal to zero, the other counter is
|
||||||
checked. If that counter also is equal to zero, the PM core executes the
|
checked. If that counter also is equal to zero, the PM core executes the
|
||||||
subsystem-level idle callback with the device as an argument.
|
idle callback with the device as its argument.
|
||||||
|
|
||||||
The action performed by a subsystem-level idle callback is totally dependent on
|
The action performed by the idle callback is totally dependent on the subsystem
|
||||||
the subsystem in question, but the expected and recommended action is to check
|
(or driver) in question, but the expected and recommended action is to check
|
||||||
if the device can be suspended (i.e. if all of the conditions necessary for
|
if the device can be suspended (i.e. if all of the conditions necessary for
|
||||||
suspending the device are satisfied) and to queue up a suspend request for the
|
suspending the device are satisfied) and to queue up a suspend request for the
|
||||||
device in that case. The value returned by this callback is ignored by the PM
|
device in that case. The value returned by this callback is ignored by the PM
|
||||||
core.
|
core.
|
||||||
|
|
||||||
The helper functions provided by the PM core, described in Section 4, guarantee
|
The helper functions provided by the PM core, described in Section 4, guarantee
|
||||||
that the following constraints are met with respect to the bus type's runtime
|
that the following constraints are met with respect to runtime PM callbacks for
|
||||||
PM callbacks:
|
one device:
|
||||||
|
|
||||||
(1) The callbacks are mutually exclusive (e.g. it is forbidden to execute
|
(1) The callbacks are mutually exclusive (e.g. it is forbidden to execute
|
||||||
->runtime_suspend() in parallel with ->runtime_resume() or with another
|
->runtime_suspend() in parallel with ->runtime_resume() or with another
|
||||||
|
|
|
@ -79,7 +79,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
|
||||||
#define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */
|
#define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */
|
||||||
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
|
||||||
#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */
|
#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */
|
||||||
#define TIF_FREEZE 16 /* is freezing for suspend */
|
|
||||||
|
|
||||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||||
|
@ -87,7 +86,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
|
||||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
|
||||||
|
|
||||||
/* Work to do on interrupt/exception return. */
|
/* Work to do on interrupt/exception return. */
|
||||||
#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
||||||
|
|
|
@ -142,7 +142,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
|
||||||
#define TIF_POLLING_NRFLAG 16
|
#define TIF_POLLING_NRFLAG 16
|
||||||
#define TIF_USING_IWMMXT 17
|
#define TIF_USING_IWMMXT 17
|
||||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||||
#define TIF_FREEZE 19
|
|
||||||
#define TIF_RESTORE_SIGMASK 20
|
#define TIF_RESTORE_SIGMASK 20
|
||||||
#define TIF_SECCOMP 21
|
#define TIF_SECCOMP 21
|
||||||
|
|
||||||
|
@ -152,7 +151,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
|
||||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||||
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
|
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
|
||||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
|
||||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||||
|
|
||||||
|
|
|
@ -85,7 +85,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */
|
#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */
|
||||||
#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */
|
#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */
|
||||||
#define TIF_NOTIFY_RESUME 9 /* callback before returning to user */
|
#define TIF_NOTIFY_RESUME 9 /* callback before returning to user */
|
||||||
#define TIF_FREEZE 29
|
|
||||||
#define TIF_DEBUG 30 /* debugging enabled */
|
#define TIF_DEBUG 30 /* debugging enabled */
|
||||||
#define TIF_USERSPACE 31 /* true if FS sets userspace */
|
#define TIF_USERSPACE 31 /* true if FS sets userspace */
|
||||||
|
|
||||||
|
@ -98,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||||
#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
|
#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
|
||||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
|
||||||
|
|
||||||
/* Note: The masks below must never span more than 16 bits! */
|
/* Note: The masks below must never span more than 16 bits! */
|
||||||
|
|
||||||
|
|
|
@ -100,7 +100,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
TIF_NEED_RESCHED */
|
TIF_NEED_RESCHED */
|
||||||
#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
|
||||||
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
||||||
#define TIF_FREEZE 6 /* is freezing for suspend */
|
|
||||||
#define TIF_IRQ_SYNC 7 /* sync pipeline stage */
|
#define TIF_IRQ_SYNC 7 /* sync pipeline stage */
|
||||||
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
|
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
|
||||||
#define TIF_SINGLESTEP 9
|
#define TIF_SINGLESTEP 9
|
||||||
|
@ -111,7 +110,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
|
||||||
#define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC)
|
#define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC)
|
||||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||||
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
|
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
|
||||||
|
|
|
@ -86,7 +86,6 @@ struct thread_info {
|
||||||
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
|
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
|
||||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||||
#define TIF_FREEZE 18 /* is freezing for suspend */
|
|
||||||
|
|
||||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||||
|
@ -94,7 +93,6 @@ struct thread_info {
|
||||||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
|
||||||
|
|
||||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||||
|
|
|
@ -111,7 +111,6 @@ register struct thread_info *__current_thread_info asm("gr15");
|
||||||
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
||||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||||
#define TIF_FREEZE 18 /* freezing for suspend */
|
|
||||||
|
|
||||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||||
|
@ -120,7 +119,6 @@ register struct thread_info *__current_thread_info asm("gr15");
|
||||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
|
||||||
|
|
||||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||||
|
|
|
@ -90,7 +90,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
|
||||||
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
||||||
#define TIF_NOTIFY_RESUME 6 /* callback before returning to user */
|
#define TIF_NOTIFY_RESUME 6 /* callback before returning to user */
|
||||||
#define TIF_FREEZE 16 /* is freezing for suspend */
|
|
||||||
|
|
||||||
/* as above, but as bit values */
|
/* as above, but as bit values */
|
||||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||||
|
@ -99,7 +98,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
|
||||||
|
|
||||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||||
|
|
||||||
|
|
|
@ -113,7 +113,6 @@ struct thread_info {
|
||||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||||
#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
|
#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
|
||||||
#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
|
#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
|
||||||
#define TIF_FREEZE 20 /* is freezing for suspend */
|
|
||||||
#define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */
|
#define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */
|
||||||
|
|
||||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||||
|
@ -126,7 +125,6 @@ struct thread_info {
|
||||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||||
#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
|
#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
|
||||||
#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
|
#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
|
||||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
|
||||||
#define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE)
|
#define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE)
|
||||||
|
|
||||||
/* "work to do on user-return" bits */
|
/* "work to do on user-return" bits */
|
||||||
|
|
|
@ -138,7 +138,6 @@ static inline unsigned int get_thread_fault_code(void)
|
||||||
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
|
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
|
||||||
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||||
#define TIF_FREEZE 19 /* is freezing for suspend */
|
|
||||||
|
|
||||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||||
|
@ -149,7 +148,6 @@ static inline unsigned int get_thread_fault_code(void)
|
||||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||||
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
|
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
|
||||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
|
||||||
|
|
||||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||||
|
|
|
@ -103,7 +103,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
#define TIF_DELAYED_TRACE 14 /* single step a syscall */
|
#define TIF_DELAYED_TRACE 14 /* single step a syscall */
|
||||||
#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
|
#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
|
||||||
#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
|
||||||
#define TIF_FREEZE 17 /* thread is freezing for suspend */
|
|
||||||
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */
|
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */
|
||||||
|
|
||||||
#endif /* _ASM_M68K_THREAD_INFO_H */
|
#endif /* _ASM_M68K_THREAD_INFO_H */
|
||||||
|
|
|
@ -125,7 +125,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
#define TIF_MEMDIE 6 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 6 /* is terminating due to OOM killer */
|
||||||
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
|
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
|
||||||
#define TIF_SECCOMP 10 /* secure computing */
|
#define TIF_SECCOMP 10 /* secure computing */
|
||||||
#define TIF_FREEZE 14 /* Freezing for suspend */
|
|
||||||
|
|
||||||
/* true if poll_idle() is polling TIF_NEED_RESCHED */
|
/* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||||
#define TIF_POLLING_NRFLAG 16
|
#define TIF_POLLING_NRFLAG 16
|
||||||
|
@ -137,7 +136,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||||
#define _TIF_IRET (1 << TIF_IRET)
|
#define _TIF_IRET (1 << TIF_IRET)
|
||||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
|
||||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||||
|
|
||||||
|
|
|
@ -117,7 +117,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
|
||||||
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
|
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
|
||||||
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||||
#define TIF_FREEZE 19
|
|
||||||
#define TIF_FIXADE 20 /* Fix address errors in software */
|
#define TIF_FIXADE 20 /* Fix address errors in software */
|
||||||
#define TIF_LOGADE 21 /* Log address errors to syslog */
|
#define TIF_LOGADE 21 /* Log address errors to syslog */
|
||||||
#define TIF_32BIT_REGS 22 /* also implies 16/32 fprs */
|
#define TIF_32BIT_REGS 22 /* also implies 16/32 fprs */
|
||||||
|
@ -141,7 +140,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
|
||||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||||
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
|
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
|
||||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
|
||||||
#define _TIF_FIXADE (1<<TIF_FIXADE)
|
#define _TIF_FIXADE (1<<TIF_FIXADE)
|
||||||
#define _TIF_LOGADE (1<<TIF_LOGADE)
|
#define _TIF_LOGADE (1<<TIF_LOGADE)
|
||||||
#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS)
|
#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS)
|
||||||
|
|
|
@ -165,7 +165,6 @@ extern void free_thread_info(struct thread_info *);
|
||||||
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
||||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||||
#define TIF_FREEZE 18 /* freezing for suspend */
|
|
||||||
|
|
||||||
#define _TIF_SYSCALL_TRACE +(1 << TIF_SYSCALL_TRACE)
|
#define _TIF_SYSCALL_TRACE +(1 << TIF_SYSCALL_TRACE)
|
||||||
#define _TIF_NOTIFY_RESUME +(1 << TIF_NOTIFY_RESUME)
|
#define _TIF_NOTIFY_RESUME +(1 << TIF_NOTIFY_RESUME)
|
||||||
|
@ -174,7 +173,6 @@ extern void free_thread_info(struct thread_info *);
|
||||||
#define _TIF_SINGLESTEP +(1 << TIF_SINGLESTEP)
|
#define _TIF_SINGLESTEP +(1 << TIF_SINGLESTEP)
|
||||||
#define _TIF_RESTORE_SIGMASK +(1 << TIF_RESTORE_SIGMASK)
|
#define _TIF_RESTORE_SIGMASK +(1 << TIF_RESTORE_SIGMASK)
|
||||||
#define _TIF_POLLING_NRFLAG +(1 << TIF_POLLING_NRFLAG)
|
#define _TIF_POLLING_NRFLAG +(1 << TIF_POLLING_NRFLAG)
|
||||||
#define _TIF_FREEZE +(1 << TIF_FREEZE)
|
|
||||||
|
|
||||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||||
|
|
|
@ -58,7 +58,6 @@ struct thread_info {
|
||||||
#define TIF_32BIT 4 /* 32 bit binary */
|
#define TIF_32BIT 4 /* 32 bit binary */
|
||||||
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
||||||
#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
|
#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
|
||||||
#define TIF_FREEZE 7 /* is freezing for suspend */
|
|
||||||
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
|
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
|
||||||
#define TIF_SINGLESTEP 9 /* single stepping? */
|
#define TIF_SINGLESTEP 9 /* single stepping? */
|
||||||
#define TIF_BLOCKSTEP 10 /* branch stepping? */
|
#define TIF_BLOCKSTEP 10 /* branch stepping? */
|
||||||
|
@ -69,7 +68,6 @@ struct thread_info {
|
||||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||||
#define _TIF_32BIT (1 << TIF_32BIT)
|
#define _TIF_32BIT (1 << TIF_32BIT)
|
||||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
|
||||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||||
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
|
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
|
||||||
|
|
|
@ -109,7 +109,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
|
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
|
||||||
#define TIF_NOERROR 12 /* Force successful syscall return */
|
#define TIF_NOERROR 12 /* Force successful syscall return */
|
||||||
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
|
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
|
||||||
#define TIF_FREEZE 14 /* Freezing for suspend */
|
|
||||||
#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
|
#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
|
||||||
#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
|
#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
|
||||||
|
|
||||||
|
@ -127,7 +126,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
|
#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
|
||||||
#define _TIF_NOERROR (1<<TIF_NOERROR)
|
#define _TIF_NOERROR (1<<TIF_NOERROR)
|
||||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
|
||||||
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
||||||
#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
|
#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
|
||||||
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
||||||
|
|
|
@ -1406,7 +1406,6 @@ static struct bus_type vio_bus_type = {
|
||||||
.match = vio_bus_match,
|
.match = vio_bus_match,
|
||||||
.probe = vio_bus_probe,
|
.probe = vio_bus_probe,
|
||||||
.remove = vio_bus_remove,
|
.remove = vio_bus_remove,
|
||||||
.pm = GENERIC_SUBSYS_PM_OPS,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -102,7 +102,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||||
#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
|
#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
|
||||||
#define TIF_SINGLE_STEP 20 /* This task is single stepped */
|
#define TIF_SINGLE_STEP 20 /* This task is single stepped */
|
||||||
#define TIF_FREEZE 21 /* thread is freezing for suspend */
|
|
||||||
|
|
||||||
#define _TIF_SYSCALL (1<<TIF_SYSCALL)
|
#define _TIF_SYSCALL (1<<TIF_SYSCALL)
|
||||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||||
|
@ -119,7 +118,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||||
#define _TIF_31BIT (1<<TIF_31BIT)
|
#define _TIF_31BIT (1<<TIF_31BIT)
|
||||||
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
|
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
|
||||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
|
||||||
|
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
#define is_32bit_task() (test_thread_flag(TIF_31BIT))
|
#define is_32bit_task() (test_thread_flag(TIF_31BIT))
|
||||||
|
|
|
@ -122,7 +122,6 @@ extern void init_thread_xstate(void);
|
||||||
#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
|
#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
|
||||||
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||||
#define TIF_FREEZE 19 /* Freezing for suspend */
|
|
||||||
|
|
||||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||||
|
@ -133,7 +132,6 @@ extern void init_thread_xstate(void);
|
||||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||||
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
||||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we
|
* _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we
|
||||||
|
|
|
@ -133,7 +133,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
|
||||||
#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
|
#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
|
||||||
* TIF_NEED_RESCHED */
|
* TIF_NEED_RESCHED */
|
||||||
#define TIF_MEMDIE 10 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 10 /* is terminating due to OOM killer */
|
||||||
#define TIF_FREEZE 11 /* is freezing for suspend */
|
|
||||||
|
|
||||||
/* as above, but as bit values */
|
/* as above, but as bit values */
|
||||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||||
|
@ -147,7 +146,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
|
||||||
#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \
|
#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \
|
||||||
_TIF_SIGPENDING | \
|
_TIF_SIGPENDING | \
|
||||||
_TIF_RESTORE_SIGMASK)
|
_TIF_RESTORE_SIGMASK)
|
||||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
|
|
|
@ -225,7 +225,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
||||||
/* flag bit 12 is available */
|
/* flag bit 12 is available */
|
||||||
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
|
||||||
#define TIF_POLLING_NRFLAG 14
|
#define TIF_POLLING_NRFLAG 14
|
||||||
#define TIF_FREEZE 15 /* is freezing for suspend */
|
|
||||||
|
|
||||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||||
|
@ -237,7 +236,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
||||||
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
|
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
|
||||||
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
||||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
|
||||||
|
|
||||||
#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
|
#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
|
||||||
_TIF_DO_NOTIFY_RESUME_MASK | \
|
_TIF_DO_NOTIFY_RESUME_MASK | \
|
||||||
|
|
|
@ -71,7 +71,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
||||||
#define TIF_SYSCALL_AUDIT 6
|
#define TIF_SYSCALL_AUDIT 6
|
||||||
#define TIF_RESTORE_SIGMASK 7
|
#define TIF_RESTORE_SIGMASK 7
|
||||||
#define TIF_FREEZE 16 /* is freezing for suspend */
|
|
||||||
|
|
||||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||||
|
@ -80,6 +79,5 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
|
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
|
||||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -135,14 +135,12 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
|
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
|
||||||
#define TIF_SYSCALL_TRACE 8
|
#define TIF_SYSCALL_TRACE 8
|
||||||
#define TIF_MEMDIE 18
|
#define TIF_MEMDIE 18
|
||||||
#define TIF_FREEZE 19
|
|
||||||
#define TIF_RESTORE_SIGMASK 20
|
#define TIF_RESTORE_SIGMASK 20
|
||||||
|
|
||||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
|
||||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -90,7 +90,6 @@ struct thread_info {
|
||||||
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
|
||||||
#define TIF_DEBUG 21 /* uses debug registers */
|
#define TIF_DEBUG 21 /* uses debug registers */
|
||||||
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
|
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
|
||||||
#define TIF_FREEZE 23 /* is freezing for suspend */
|
|
||||||
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
|
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
|
||||||
#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
|
#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
|
||||||
#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
|
#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
|
||||||
|
@ -112,7 +111,6 @@ struct thread_info {
|
||||||
#define _TIF_FORK (1 << TIF_FORK)
|
#define _TIF_FORK (1 << TIF_FORK)
|
||||||
#define _TIF_DEBUG (1 << TIF_DEBUG)
|
#define _TIF_DEBUG (1 << TIF_DEBUG)
|
||||||
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
|
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
|
||||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
|
||||||
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
|
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
|
||||||
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
|
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
|
||||||
#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
|
#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
|
||||||
|
|
|
@ -132,7 +132,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
||||||
#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
|
#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
|
||||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||||
#define TIF_FREEZE 17 /* is freezing for suspend */
|
|
||||||
|
|
||||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||||
|
@ -141,7 +140,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||||
#define _TIF_IRET (1<<TIF_IRET)
|
#define _TIF_IRET (1<<TIF_IRET)
|
||||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
|
||||||
|
|
||||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||||
|
|
|
@ -476,6 +476,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
|
||||||
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
|
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.callback = init_nvs_nosave,
|
||||||
|
.ident = "Asus K54C",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = init_nvs_nosave,
|
||||||
|
.ident = "Asus K54HR",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
|
||||||
|
},
|
||||||
|
},
|
||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
#endif /* CONFIG_SUSPEND */
|
#endif /* CONFIG_SUSPEND */
|
||||||
|
|
|
@ -109,31 +109,7 @@ static int amba_legacy_resume(struct device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amba_pm_prepare(struct device *dev)
|
#endif /* CONFIG_PM_SLEEP */
|
||||||
{
|
|
||||||
struct device_driver *drv = dev->driver;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (drv && drv->pm && drv->pm->prepare)
|
|
||||||
ret = drv->pm->prepare(dev);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void amba_pm_complete(struct device *dev)
|
|
||||||
{
|
|
||||||
struct device_driver *drv = dev->driver;
|
|
||||||
|
|
||||||
if (drv && drv->pm && drv->pm->complete)
|
|
||||||
drv->pm->complete(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
#else /* !CONFIG_PM_SLEEP */
|
|
||||||
|
|
||||||
#define amba_pm_prepare NULL
|
|
||||||
#define amba_pm_complete NULL
|
|
||||||
|
|
||||||
#endif /* !CONFIG_PM_SLEEP */
|
|
||||||
|
|
||||||
#ifdef CONFIG_SUSPEND
|
#ifdef CONFIG_SUSPEND
|
||||||
|
|
||||||
|
@ -155,22 +131,6 @@ static int amba_pm_suspend(struct device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amba_pm_suspend_noirq(struct device *dev)
|
|
||||||
{
|
|
||||||
struct device_driver *drv = dev->driver;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!drv)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (drv->pm) {
|
|
||||||
if (drv->pm->suspend_noirq)
|
|
||||||
ret = drv->pm->suspend_noirq(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int amba_pm_resume(struct device *dev)
|
static int amba_pm_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct device_driver *drv = dev->driver;
|
struct device_driver *drv = dev->driver;
|
||||||
|
@ -189,28 +149,10 @@ static int amba_pm_resume(struct device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amba_pm_resume_noirq(struct device *dev)
|
|
||||||
{
|
|
||||||
struct device_driver *drv = dev->driver;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!drv)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (drv->pm) {
|
|
||||||
if (drv->pm->resume_noirq)
|
|
||||||
ret = drv->pm->resume_noirq(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#else /* !CONFIG_SUSPEND */
|
#else /* !CONFIG_SUSPEND */
|
||||||
|
|
||||||
#define amba_pm_suspend NULL
|
#define amba_pm_suspend NULL
|
||||||
#define amba_pm_resume NULL
|
#define amba_pm_resume NULL
|
||||||
#define amba_pm_suspend_noirq NULL
|
|
||||||
#define amba_pm_resume_noirq NULL
|
|
||||||
|
|
||||||
#endif /* !CONFIG_SUSPEND */
|
#endif /* !CONFIG_SUSPEND */
|
||||||
|
|
||||||
|
@ -234,22 +176,6 @@ static int amba_pm_freeze(struct device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amba_pm_freeze_noirq(struct device *dev)
|
|
||||||
{
|
|
||||||
struct device_driver *drv = dev->driver;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!drv)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (drv->pm) {
|
|
||||||
if (drv->pm->freeze_noirq)
|
|
||||||
ret = drv->pm->freeze_noirq(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int amba_pm_thaw(struct device *dev)
|
static int amba_pm_thaw(struct device *dev)
|
||||||
{
|
{
|
||||||
struct device_driver *drv = dev->driver;
|
struct device_driver *drv = dev->driver;
|
||||||
|
@ -268,22 +194,6 @@ static int amba_pm_thaw(struct device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amba_pm_thaw_noirq(struct device *dev)
|
|
||||||
{
|
|
||||||
struct device_driver *drv = dev->driver;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!drv)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (drv->pm) {
|
|
||||||
if (drv->pm->thaw_noirq)
|
|
||||||
ret = drv->pm->thaw_noirq(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int amba_pm_poweroff(struct device *dev)
|
static int amba_pm_poweroff(struct device *dev)
|
||||||
{
|
{
|
||||||
struct device_driver *drv = dev->driver;
|
struct device_driver *drv = dev->driver;
|
||||||
|
@ -302,22 +212,6 @@ static int amba_pm_poweroff(struct device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amba_pm_poweroff_noirq(struct device *dev)
|
|
||||||
{
|
|
||||||
struct device_driver *drv = dev->driver;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!drv)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (drv->pm) {
|
|
||||||
if (drv->pm->poweroff_noirq)
|
|
||||||
ret = drv->pm->poweroff_noirq(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int amba_pm_restore(struct device *dev)
|
static int amba_pm_restore(struct device *dev)
|
||||||
{
|
{
|
||||||
struct device_driver *drv = dev->driver;
|
struct device_driver *drv = dev->driver;
|
||||||
|
@ -336,32 +230,12 @@ static int amba_pm_restore(struct device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amba_pm_restore_noirq(struct device *dev)
|
|
||||||
{
|
|
||||||
struct device_driver *drv = dev->driver;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!drv)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (drv->pm) {
|
|
||||||
if (drv->pm->restore_noirq)
|
|
||||||
ret = drv->pm->restore_noirq(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#else /* !CONFIG_HIBERNATE_CALLBACKS */
|
#else /* !CONFIG_HIBERNATE_CALLBACKS */
|
||||||
|
|
||||||
#define amba_pm_freeze NULL
|
#define amba_pm_freeze NULL
|
||||||
#define amba_pm_thaw NULL
|
#define amba_pm_thaw NULL
|
||||||
#define amba_pm_poweroff NULL
|
#define amba_pm_poweroff NULL
|
||||||
#define amba_pm_restore NULL
|
#define amba_pm_restore NULL
|
||||||
#define amba_pm_freeze_noirq NULL
|
|
||||||
#define amba_pm_thaw_noirq NULL
|
|
||||||
#define amba_pm_poweroff_noirq NULL
|
|
||||||
#define amba_pm_restore_noirq NULL
|
|
||||||
|
|
||||||
#endif /* !CONFIG_HIBERNATE_CALLBACKS */
|
#endif /* !CONFIG_HIBERNATE_CALLBACKS */
|
||||||
|
|
||||||
|
@ -402,20 +276,12 @@ static int amba_pm_runtime_resume(struct device *dev)
|
||||||
#ifdef CONFIG_PM
|
#ifdef CONFIG_PM
|
||||||
|
|
||||||
static const struct dev_pm_ops amba_pm = {
|
static const struct dev_pm_ops amba_pm = {
|
||||||
.prepare = amba_pm_prepare,
|
|
||||||
.complete = amba_pm_complete,
|
|
||||||
.suspend = amba_pm_suspend,
|
.suspend = amba_pm_suspend,
|
||||||
.resume = amba_pm_resume,
|
.resume = amba_pm_resume,
|
||||||
.freeze = amba_pm_freeze,
|
.freeze = amba_pm_freeze,
|
||||||
.thaw = amba_pm_thaw,
|
.thaw = amba_pm_thaw,
|
||||||
.poweroff = amba_pm_poweroff,
|
.poweroff = amba_pm_poweroff,
|
||||||
.restore = amba_pm_restore,
|
.restore = amba_pm_restore,
|
||||||
.suspend_noirq = amba_pm_suspend_noirq,
|
|
||||||
.resume_noirq = amba_pm_resume_noirq,
|
|
||||||
.freeze_noirq = amba_pm_freeze_noirq,
|
|
||||||
.thaw_noirq = amba_pm_thaw_noirq,
|
|
||||||
.poweroff_noirq = amba_pm_poweroff_noirq,
|
|
||||||
.restore_noirq = amba_pm_restore_noirq,
|
|
||||||
SET_RUNTIME_PM_OPS(
|
SET_RUNTIME_PM_OPS(
|
||||||
amba_pm_runtime_suspend,
|
amba_pm_runtime_suspend,
|
||||||
amba_pm_runtime_resume,
|
amba_pm_runtime_resume,
|
||||||
|
|
|
@ -534,6 +534,8 @@ static int _request_firmware(const struct firmware **firmware_p,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
read_lock_usermodehelper();
|
||||||
|
|
||||||
if (WARN_ON(usermodehelper_is_disabled())) {
|
if (WARN_ON(usermodehelper_is_disabled())) {
|
||||||
dev_err(device, "firmware: %s will not be loaded\n", name);
|
dev_err(device, "firmware: %s will not be loaded\n", name);
|
||||||
retval = -EBUSY;
|
retval = -EBUSY;
|
||||||
|
@ -572,6 +574,8 @@ static int _request_firmware(const struct firmware **firmware_p,
|
||||||
fw_destroy_instance(fw_priv);
|
fw_destroy_instance(fw_priv);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
read_unlock_usermodehelper();
|
||||||
|
|
||||||
if (retval) {
|
if (retval) {
|
||||||
release_firmware(firmware);
|
release_firmware(firmware);
|
||||||
*firmware_p = NULL;
|
*firmware_p = NULL;
|
||||||
|
|
|
@ -700,25 +700,6 @@ static int platform_legacy_resume(struct device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int platform_pm_prepare(struct device *dev)
|
|
||||||
{
|
|
||||||
struct device_driver *drv = dev->driver;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (drv && drv->pm && drv->pm->prepare)
|
|
||||||
ret = drv->pm->prepare(dev);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
void platform_pm_complete(struct device *dev)
|
|
||||||
{
|
|
||||||
struct device_driver *drv = dev->driver;
|
|
||||||
|
|
||||||
if (drv && drv->pm && drv->pm->complete)
|
|
||||||
drv->pm->complete(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* CONFIG_PM_SLEEP */
|
#endif /* CONFIG_PM_SLEEP */
|
||||||
|
|
||||||
#ifdef CONFIG_SUSPEND
|
#ifdef CONFIG_SUSPEND
|
||||||
|
@ -741,22 +722,6 @@ int platform_pm_suspend(struct device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int platform_pm_suspend_noirq(struct device *dev)
|
|
||||||
{
|
|
||||||
struct device_driver *drv = dev->driver;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!drv)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (drv->pm) {
|
|
||||||
if (drv->pm->suspend_noirq)
|
|
||||||
ret = drv->pm->suspend_noirq(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int platform_pm_resume(struct device *dev)
|
int platform_pm_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct device_driver *drv = dev->driver;
|
struct device_driver *drv = dev->driver;
|
||||||
|
@ -775,22 +740,6 @@ int platform_pm_resume(struct device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int platform_pm_resume_noirq(struct device *dev)
|
|
||||||
{
|
|
||||||
struct device_driver *drv = dev->driver;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!drv)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (drv->pm) {
|
|
||||||
if (drv->pm->resume_noirq)
|
|
||||||
ret = drv->pm->resume_noirq(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* CONFIG_SUSPEND */
|
#endif /* CONFIG_SUSPEND */
|
||||||
|
|
||||||
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
||||||
|
@ -813,22 +762,6 @@ int platform_pm_freeze(struct device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int platform_pm_freeze_noirq(struct device *dev)
|
|
||||||
{
|
|
||||||
struct device_driver *drv = dev->driver;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!drv)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (drv->pm) {
|
|
||||||
if (drv->pm->freeze_noirq)
|
|
||||||
ret = drv->pm->freeze_noirq(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int platform_pm_thaw(struct device *dev)
|
int platform_pm_thaw(struct device *dev)
|
||||||
{
|
{
|
||||||
struct device_driver *drv = dev->driver;
|
struct device_driver *drv = dev->driver;
|
||||||
|
@ -847,22 +780,6 @@ int platform_pm_thaw(struct device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int platform_pm_thaw_noirq(struct device *dev)
|
|
||||||
{
|
|
||||||
struct device_driver *drv = dev->driver;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!drv)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (drv->pm) {
|
|
||||||
if (drv->pm->thaw_noirq)
|
|
||||||
ret = drv->pm->thaw_noirq(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int platform_pm_poweroff(struct device *dev)
|
int platform_pm_poweroff(struct device *dev)
|
||||||
{
|
{
|
||||||
struct device_driver *drv = dev->driver;
|
struct device_driver *drv = dev->driver;
|
||||||
|
@ -881,22 +798,6 @@ int platform_pm_poweroff(struct device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int platform_pm_poweroff_noirq(struct device *dev)
|
|
||||||
{
|
|
||||||
struct device_driver *drv = dev->driver;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!drv)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (drv->pm) {
|
|
||||||
if (drv->pm->poweroff_noirq)
|
|
||||||
ret = drv->pm->poweroff_noirq(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int platform_pm_restore(struct device *dev)
|
int platform_pm_restore(struct device *dev)
|
||||||
{
|
{
|
||||||
struct device_driver *drv = dev->driver;
|
struct device_driver *drv = dev->driver;
|
||||||
|
@ -915,22 +816,6 @@ int platform_pm_restore(struct device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int platform_pm_restore_noirq(struct device *dev)
|
|
||||||
{
|
|
||||||
struct device_driver *drv = dev->driver;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!drv)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (drv->pm) {
|
|
||||||
if (drv->pm->restore_noirq)
|
|
||||||
ret = drv->pm->restore_noirq(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* CONFIG_HIBERNATE_CALLBACKS */
|
#endif /* CONFIG_HIBERNATE_CALLBACKS */
|
||||||
|
|
||||||
static const struct dev_pm_ops platform_dev_pm_ops = {
|
static const struct dev_pm_ops platform_dev_pm_ops = {
|
||||||
|
|
|
@ -97,16 +97,16 @@ int pm_generic_prepare(struct device *dev)
|
||||||
* @event: PM transition of the system under way.
|
* @event: PM transition of the system under way.
|
||||||
* @bool: Whether or not this is the "noirq" stage.
|
* @bool: Whether or not this is the "noirq" stage.
|
||||||
*
|
*
|
||||||
* If the device has not been suspended at run time, execute the
|
* Execute the PM callback corresponding to @event provided by the driver of
|
||||||
* suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
|
* @dev, if defined, and return its error code. Return 0 if the callback is
|
||||||
* return its error code. Otherwise, return zero.
|
* not present.
|
||||||
*/
|
*/
|
||||||
static int __pm_generic_call(struct device *dev, int event, bool noirq)
|
static int __pm_generic_call(struct device *dev, int event, bool noirq)
|
||||||
{
|
{
|
||||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||||
int (*callback)(struct device *);
|
int (*callback)(struct device *);
|
||||||
|
|
||||||
if (!pm || pm_runtime_suspended(dev))
|
if (!pm)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
switch (event) {
|
switch (event) {
|
||||||
|
@ -119,9 +119,15 @@ static int __pm_generic_call(struct device *dev, int event, bool noirq)
|
||||||
case PM_EVENT_HIBERNATE:
|
case PM_EVENT_HIBERNATE:
|
||||||
callback = noirq ? pm->poweroff_noirq : pm->poweroff;
|
callback = noirq ? pm->poweroff_noirq : pm->poweroff;
|
||||||
break;
|
break;
|
||||||
|
case PM_EVENT_RESUME:
|
||||||
|
callback = noirq ? pm->resume_noirq : pm->resume;
|
||||||
|
break;
|
||||||
case PM_EVENT_THAW:
|
case PM_EVENT_THAW:
|
||||||
callback = noirq ? pm->thaw_noirq : pm->thaw;
|
callback = noirq ? pm->thaw_noirq : pm->thaw;
|
||||||
break;
|
break;
|
||||||
|
case PM_EVENT_RESTORE:
|
||||||
|
callback = noirq ? pm->restore_noirq : pm->restore;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
callback = NULL;
|
callback = NULL;
|
||||||
break;
|
break;
|
||||||
|
@ -210,57 +216,13 @@ int pm_generic_thaw(struct device *dev)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pm_generic_thaw);
|
EXPORT_SYMBOL_GPL(pm_generic_thaw);
|
||||||
|
|
||||||
/**
|
|
||||||
* __pm_generic_resume - Generic resume/restore callback for subsystems.
|
|
||||||
* @dev: Device to handle.
|
|
||||||
* @event: PM transition of the system under way.
|
|
||||||
* @bool: Whether or not this is the "noirq" stage.
|
|
||||||
*
|
|
||||||
* Execute the resume/resotre callback provided by the @dev's driver, if
|
|
||||||
* defined. If it returns 0, change the device's runtime PM status to 'active'.
|
|
||||||
* Return the callback's error code.
|
|
||||||
*/
|
|
||||||
static int __pm_generic_resume(struct device *dev, int event, bool noirq)
|
|
||||||
{
|
|
||||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
|
||||||
int (*callback)(struct device *);
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!pm)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
switch (event) {
|
|
||||||
case PM_EVENT_RESUME:
|
|
||||||
callback = noirq ? pm->resume_noirq : pm->resume;
|
|
||||||
break;
|
|
||||||
case PM_EVENT_RESTORE:
|
|
||||||
callback = noirq ? pm->restore_noirq : pm->restore;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
callback = NULL;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!callback)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
ret = callback(dev);
|
|
||||||
if (!ret && !noirq && pm_runtime_enabled(dev)) {
|
|
||||||
pm_runtime_disable(dev);
|
|
||||||
pm_runtime_set_active(dev);
|
|
||||||
pm_runtime_enable(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
|
* pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
|
||||||
* @dev: Device to resume.
|
* @dev: Device to resume.
|
||||||
*/
|
*/
|
||||||
int pm_generic_resume_noirq(struct device *dev)
|
int pm_generic_resume_noirq(struct device *dev)
|
||||||
{
|
{
|
||||||
return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
|
return __pm_generic_call(dev, PM_EVENT_RESUME, true);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
|
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
|
||||||
|
|
||||||
|
@ -270,7 +232,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
|
||||||
*/
|
*/
|
||||||
int pm_generic_resume(struct device *dev)
|
int pm_generic_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
|
return __pm_generic_call(dev, PM_EVENT_RESUME, false);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pm_generic_resume);
|
EXPORT_SYMBOL_GPL(pm_generic_resume);
|
||||||
|
|
||||||
|
@ -280,7 +242,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
|
||||||
*/
|
*/
|
||||||
int pm_generic_restore_noirq(struct device *dev)
|
int pm_generic_restore_noirq(struct device *dev)
|
||||||
{
|
{
|
||||||
return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
|
return __pm_generic_call(dev, PM_EVENT_RESTORE, true);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
|
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
|
||||||
|
|
||||||
|
@ -290,7 +252,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
|
||||||
*/
|
*/
|
||||||
int pm_generic_restore(struct device *dev)
|
int pm_generic_restore(struct device *dev)
|
||||||
{
|
{
|
||||||
return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
|
return __pm_generic_call(dev, PM_EVENT_RESTORE, false);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pm_generic_restore);
|
EXPORT_SYMBOL_GPL(pm_generic_restore);
|
||||||
|
|
||||||
|
@ -314,28 +276,3 @@ void pm_generic_complete(struct device *dev)
|
||||||
pm_runtime_idle(dev);
|
pm_runtime_idle(dev);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_PM_SLEEP */
|
#endif /* CONFIG_PM_SLEEP */
|
||||||
|
|
||||||
struct dev_pm_ops generic_subsys_pm_ops = {
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
|
||||||
.prepare = pm_generic_prepare,
|
|
||||||
.suspend = pm_generic_suspend,
|
|
||||||
.suspend_noirq = pm_generic_suspend_noirq,
|
|
||||||
.resume = pm_generic_resume,
|
|
||||||
.resume_noirq = pm_generic_resume_noirq,
|
|
||||||
.freeze = pm_generic_freeze,
|
|
||||||
.freeze_noirq = pm_generic_freeze_noirq,
|
|
||||||
.thaw = pm_generic_thaw,
|
|
||||||
.thaw_noirq = pm_generic_thaw_noirq,
|
|
||||||
.poweroff = pm_generic_poweroff,
|
|
||||||
.poweroff_noirq = pm_generic_poweroff_noirq,
|
|
||||||
.restore = pm_generic_restore,
|
|
||||||
.restore_noirq = pm_generic_restore_noirq,
|
|
||||||
.complete = pm_generic_complete,
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_PM_RUNTIME
|
|
||||||
.runtime_suspend = pm_generic_runtime_suspend,
|
|
||||||
.runtime_resume = pm_generic_runtime_resume,
|
|
||||||
.runtime_idle = pm_generic_runtime_idle,
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
EXPORT_SYMBOL_GPL(generic_subsys_pm_ops);
|
|
||||||
|
|
|
@ -32,6 +32,8 @@
|
||||||
#include "../base.h"
|
#include "../base.h"
|
||||||
#include "power.h"
|
#include "power.h"
|
||||||
|
|
||||||
|
typedef int (*pm_callback_t)(struct device *);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The entries in the dpm_list list are in a depth first order, simply
|
* The entries in the dpm_list list are in a depth first order, simply
|
||||||
* because children are guaranteed to be discovered after parents, and
|
* because children are guaranteed to be discovered after parents, and
|
||||||
|
@ -164,8 +166,9 @@ static ktime_t initcall_debug_start(struct device *dev)
|
||||||
ktime_t calltime = ktime_set(0, 0);
|
ktime_t calltime = ktime_set(0, 0);
|
||||||
|
|
||||||
if (initcall_debug) {
|
if (initcall_debug) {
|
||||||
pr_info("calling %s+ @ %i\n",
|
pr_info("calling %s+ @ %i, parent: %s\n",
|
||||||
dev_name(dev), task_pid_nr(current));
|
dev_name(dev), task_pid_nr(current),
|
||||||
|
dev->parent ? dev_name(dev->parent) : "none");
|
||||||
calltime = ktime_get();
|
calltime = ktime_get();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -211,151 +214,69 @@ static void dpm_wait_for_children(struct device *dev, bool async)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pm_op - Execute the PM operation appropriate for given PM event.
|
* pm_op - Return the PM operation appropriate for given PM event.
|
||||||
* @dev: Device to handle.
|
|
||||||
* @ops: PM operations to choose from.
|
* @ops: PM operations to choose from.
|
||||||
* @state: PM transition of the system being carried out.
|
* @state: PM transition of the system being carried out.
|
||||||
*/
|
*/
|
||||||
static int pm_op(struct device *dev,
|
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
|
||||||
const struct dev_pm_ops *ops,
|
|
||||||
pm_message_t state)
|
|
||||||
{
|
{
|
||||||
int error = 0;
|
|
||||||
ktime_t calltime;
|
|
||||||
|
|
||||||
calltime = initcall_debug_start(dev);
|
|
||||||
|
|
||||||
switch (state.event) {
|
switch (state.event) {
|
||||||
#ifdef CONFIG_SUSPEND
|
#ifdef CONFIG_SUSPEND
|
||||||
case PM_EVENT_SUSPEND:
|
case PM_EVENT_SUSPEND:
|
||||||
if (ops->suspend) {
|
return ops->suspend;
|
||||||
error = ops->suspend(dev);
|
|
||||||
suspend_report_result(ops->suspend, error);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case PM_EVENT_RESUME:
|
case PM_EVENT_RESUME:
|
||||||
if (ops->resume) {
|
return ops->resume;
|
||||||
error = ops->resume(dev);
|
|
||||||
suspend_report_result(ops->resume, error);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
#endif /* CONFIG_SUSPEND */
|
#endif /* CONFIG_SUSPEND */
|
||||||
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
||||||
case PM_EVENT_FREEZE:
|
case PM_EVENT_FREEZE:
|
||||||
case PM_EVENT_QUIESCE:
|
case PM_EVENT_QUIESCE:
|
||||||
if (ops->freeze) {
|
return ops->freeze;
|
||||||
error = ops->freeze(dev);
|
|
||||||
suspend_report_result(ops->freeze, error);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case PM_EVENT_HIBERNATE:
|
case PM_EVENT_HIBERNATE:
|
||||||
if (ops->poweroff) {
|
return ops->poweroff;
|
||||||
error = ops->poweroff(dev);
|
|
||||||
suspend_report_result(ops->poweroff, error);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case PM_EVENT_THAW:
|
case PM_EVENT_THAW:
|
||||||
case PM_EVENT_RECOVER:
|
case PM_EVENT_RECOVER:
|
||||||
if (ops->thaw) {
|
return ops->thaw;
|
||||||
error = ops->thaw(dev);
|
|
||||||
suspend_report_result(ops->thaw, error);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case PM_EVENT_RESTORE:
|
case PM_EVENT_RESTORE:
|
||||||
if (ops->restore) {
|
return ops->restore;
|
||||||
error = ops->restore(dev);
|
|
||||||
suspend_report_result(ops->restore, error);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
#endif /* CONFIG_HIBERNATE_CALLBACKS */
|
#endif /* CONFIG_HIBERNATE_CALLBACKS */
|
||||||
default:
|
|
||||||
error = -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
initcall_debug_report(dev, calltime, error);
|
return NULL;
|
||||||
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pm_noirq_op - Execute the PM operation appropriate for given PM event.
|
* pm_noirq_op - Return the PM operation appropriate for given PM event.
|
||||||
* @dev: Device to handle.
|
|
||||||
* @ops: PM operations to choose from.
|
* @ops: PM operations to choose from.
|
||||||
* @state: PM transition of the system being carried out.
|
* @state: PM transition of the system being carried out.
|
||||||
*
|
*
|
||||||
* The driver of @dev will not receive interrupts while this function is being
|
* The driver of @dev will not receive interrupts while this function is being
|
||||||
* executed.
|
* executed.
|
||||||
*/
|
*/
|
||||||
static int pm_noirq_op(struct device *dev,
|
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
|
||||||
const struct dev_pm_ops *ops,
|
|
||||||
pm_message_t state)
|
|
||||||
{
|
{
|
||||||
int error = 0;
|
|
||||||
ktime_t calltime = ktime_set(0, 0), delta, rettime;
|
|
||||||
|
|
||||||
if (initcall_debug) {
|
|
||||||
pr_info("calling %s+ @ %i, parent: %s\n",
|
|
||||||
dev_name(dev), task_pid_nr(current),
|
|
||||||
dev->parent ? dev_name(dev->parent) : "none");
|
|
||||||
calltime = ktime_get();
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (state.event) {
|
switch (state.event) {
|
||||||
#ifdef CONFIG_SUSPEND
|
#ifdef CONFIG_SUSPEND
|
||||||
case PM_EVENT_SUSPEND:
|
case PM_EVENT_SUSPEND:
|
||||||
if (ops->suspend_noirq) {
|
return ops->suspend_noirq;
|
||||||
error = ops->suspend_noirq(dev);
|
|
||||||
suspend_report_result(ops->suspend_noirq, error);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case PM_EVENT_RESUME:
|
case PM_EVENT_RESUME:
|
||||||
if (ops->resume_noirq) {
|
return ops->resume_noirq;
|
||||||
error = ops->resume_noirq(dev);
|
|
||||||
suspend_report_result(ops->resume_noirq, error);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
#endif /* CONFIG_SUSPEND */
|
#endif /* CONFIG_SUSPEND */
|
||||||
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
||||||
case PM_EVENT_FREEZE:
|
case PM_EVENT_FREEZE:
|
||||||
case PM_EVENT_QUIESCE:
|
case PM_EVENT_QUIESCE:
|
||||||
if (ops->freeze_noirq) {
|
return ops->freeze_noirq;
|
||||||
error = ops->freeze_noirq(dev);
|
|
||||||
suspend_report_result(ops->freeze_noirq, error);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case PM_EVENT_HIBERNATE:
|
case PM_EVENT_HIBERNATE:
|
||||||
if (ops->poweroff_noirq) {
|
return ops->poweroff_noirq;
|
||||||
error = ops->poweroff_noirq(dev);
|
|
||||||
suspend_report_result(ops->poweroff_noirq, error);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case PM_EVENT_THAW:
|
case PM_EVENT_THAW:
|
||||||
case PM_EVENT_RECOVER:
|
case PM_EVENT_RECOVER:
|
||||||
if (ops->thaw_noirq) {
|
return ops->thaw_noirq;
|
||||||
error = ops->thaw_noirq(dev);
|
|
||||||
suspend_report_result(ops->thaw_noirq, error);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case PM_EVENT_RESTORE:
|
case PM_EVENT_RESTORE:
|
||||||
if (ops->restore_noirq) {
|
return ops->restore_noirq;
|
||||||
error = ops->restore_noirq(dev);
|
|
||||||
suspend_report_result(ops->restore_noirq, error);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
#endif /* CONFIG_HIBERNATE_CALLBACKS */
|
#endif /* CONFIG_HIBERNATE_CALLBACKS */
|
||||||
default:
|
|
||||||
error = -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (initcall_debug) {
|
return NULL;
|
||||||
rettime = ktime_get();
|
|
||||||
delta = ktime_sub(rettime, calltime);
|
|
||||||
printk("initcall %s_i+ returned %d after %Ld usecs\n",
|
|
||||||
dev_name(dev), error,
|
|
||||||
(unsigned long long)ktime_to_ns(delta) >> 10);
|
|
||||||
}
|
|
||||||
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static char *pm_verb(int event)
|
static char *pm_verb(int event)
|
||||||
|
@ -413,6 +334,26 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
|
||||||
usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
|
usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
|
||||||
|
pm_message_t state, char *info)
|
||||||
|
{
|
||||||
|
ktime_t calltime;
|
||||||
|
int error;
|
||||||
|
|
||||||
|
if (!cb)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
calltime = initcall_debug_start(dev);
|
||||||
|
|
||||||
|
pm_dev_dbg(dev, state, info);
|
||||||
|
error = cb(dev);
|
||||||
|
suspend_report_result(cb, error);
|
||||||
|
|
||||||
|
initcall_debug_report(dev, calltime, error);
|
||||||
|
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
/*------------------------- Resume routines -------------------------*/
|
/*------------------------- Resume routines -------------------------*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -425,25 +366,34 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
|
||||||
*/
|
*/
|
||||||
static int device_resume_noirq(struct device *dev, pm_message_t state)
|
static int device_resume_noirq(struct device *dev, pm_message_t state)
|
||||||
{
|
{
|
||||||
|
pm_callback_t callback = NULL;
|
||||||
|
char *info = NULL;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
TRACE_DEVICE(dev);
|
TRACE_DEVICE(dev);
|
||||||
TRACE_RESUME(0);
|
TRACE_RESUME(0);
|
||||||
|
|
||||||
if (dev->pm_domain) {
|
if (dev->pm_domain) {
|
||||||
pm_dev_dbg(dev, state, "EARLY power domain ");
|
info = "EARLY power domain ";
|
||||||
error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
|
callback = pm_noirq_op(&dev->pm_domain->ops, state);
|
||||||
} else if (dev->type && dev->type->pm) {
|
} else if (dev->type && dev->type->pm) {
|
||||||
pm_dev_dbg(dev, state, "EARLY type ");
|
info = "EARLY type ";
|
||||||
error = pm_noirq_op(dev, dev->type->pm, state);
|
callback = pm_noirq_op(dev->type->pm, state);
|
||||||
} else if (dev->class && dev->class->pm) {
|
} else if (dev->class && dev->class->pm) {
|
||||||
pm_dev_dbg(dev, state, "EARLY class ");
|
info = "EARLY class ";
|
||||||
error = pm_noirq_op(dev, dev->class->pm, state);
|
callback = pm_noirq_op(dev->class->pm, state);
|
||||||
} else if (dev->bus && dev->bus->pm) {
|
} else if (dev->bus && dev->bus->pm) {
|
||||||
pm_dev_dbg(dev, state, "EARLY ");
|
info = "EARLY bus ";
|
||||||
error = pm_noirq_op(dev, dev->bus->pm, state);
|
callback = pm_noirq_op(dev->bus->pm, state);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!callback && dev->driver && dev->driver->pm) {
|
||||||
|
info = "EARLY driver ";
|
||||||
|
callback = pm_noirq_op(dev->driver->pm, state);
|
||||||
|
}
|
||||||
|
|
||||||
|
error = dpm_run_callback(callback, dev, state, info);
|
||||||
|
|
||||||
TRACE_RESUME(error);
|
TRACE_RESUME(error);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
@ -485,26 +435,6 @@ void dpm_resume_noirq(pm_message_t state)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
|
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
|
||||||
|
|
||||||
/**
|
|
||||||
* legacy_resume - Execute a legacy (bus or class) resume callback for device.
|
|
||||||
* @dev: Device to resume.
|
|
||||||
* @cb: Resume callback to execute.
|
|
||||||
*/
|
|
||||||
static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
|
|
||||||
{
|
|
||||||
int error;
|
|
||||||
ktime_t calltime;
|
|
||||||
|
|
||||||
calltime = initcall_debug_start(dev);
|
|
||||||
|
|
||||||
error = cb(dev);
|
|
||||||
suspend_report_result(cb, error);
|
|
||||||
|
|
||||||
initcall_debug_report(dev, calltime, error);
|
|
||||||
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* device_resume - Execute "resume" callbacks for given device.
|
* device_resume - Execute "resume" callbacks for given device.
|
||||||
* @dev: Device to handle.
|
* @dev: Device to handle.
|
||||||
|
@ -513,6 +443,8 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
|
||||||
*/
|
*/
|
||||||
static int device_resume(struct device *dev, pm_message_t state, bool async)
|
static int device_resume(struct device *dev, pm_message_t state, bool async)
|
||||||
{
|
{
|
||||||
|
pm_callback_t callback = NULL;
|
||||||
|
char *info = NULL;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
bool put = false;
|
bool put = false;
|
||||||
|
|
||||||
|
@ -535,40 +467,48 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
|
||||||
put = true;
|
put = true;
|
||||||
|
|
||||||
if (dev->pm_domain) {
|
if (dev->pm_domain) {
|
||||||
pm_dev_dbg(dev, state, "power domain ");
|
info = "power domain ";
|
||||||
error = pm_op(dev, &dev->pm_domain->ops, state);
|
callback = pm_op(&dev->pm_domain->ops, state);
|
||||||
goto End;
|
goto Driver;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev->type && dev->type->pm) {
|
if (dev->type && dev->type->pm) {
|
||||||
pm_dev_dbg(dev, state, "type ");
|
info = "type ";
|
||||||
error = pm_op(dev, dev->type->pm, state);
|
callback = pm_op(dev->type->pm, state);
|
||||||
goto End;
|
goto Driver;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev->class) {
|
if (dev->class) {
|
||||||
if (dev->class->pm) {
|
if (dev->class->pm) {
|
||||||
pm_dev_dbg(dev, state, "class ");
|
info = "class ";
|
||||||
error = pm_op(dev, dev->class->pm, state);
|
callback = pm_op(dev->class->pm, state);
|
||||||
goto End;
|
goto Driver;
|
||||||
} else if (dev->class->resume) {
|
} else if (dev->class->resume) {
|
||||||
pm_dev_dbg(dev, state, "legacy class ");
|
info = "legacy class ";
|
||||||
error = legacy_resume(dev, dev->class->resume);
|
callback = dev->class->resume;
|
||||||
goto End;
|
goto End;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev->bus) {
|
if (dev->bus) {
|
||||||
if (dev->bus->pm) {
|
if (dev->bus->pm) {
|
||||||
pm_dev_dbg(dev, state, "");
|
info = "bus ";
|
||||||
error = pm_op(dev, dev->bus->pm, state);
|
callback = pm_op(dev->bus->pm, state);
|
||||||
} else if (dev->bus->resume) {
|
} else if (dev->bus->resume) {
|
||||||
pm_dev_dbg(dev, state, "legacy ");
|
info = "legacy bus ";
|
||||||
error = legacy_resume(dev, dev->bus->resume);
|
callback = dev->bus->resume;
|
||||||
|
goto End;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Driver:
|
||||||
|
if (!callback && dev->driver && dev->driver->pm) {
|
||||||
|
info = "driver ";
|
||||||
|
callback = pm_op(dev->driver->pm, state);
|
||||||
|
}
|
||||||
|
|
||||||
End:
|
End:
|
||||||
|
error = dpm_run_callback(callback, dev, state, info);
|
||||||
dev->power.is_suspended = false;
|
dev->power.is_suspended = false;
|
||||||
|
|
||||||
Unlock:
|
Unlock:
|
||||||
|
@ -660,24 +600,33 @@ void dpm_resume(pm_message_t state)
|
||||||
*/
|
*/
|
||||||
static void device_complete(struct device *dev, pm_message_t state)
|
static void device_complete(struct device *dev, pm_message_t state)
|
||||||
{
|
{
|
||||||
|
void (*callback)(struct device *) = NULL;
|
||||||
|
char *info = NULL;
|
||||||
|
|
||||||
device_lock(dev);
|
device_lock(dev);
|
||||||
|
|
||||||
if (dev->pm_domain) {
|
if (dev->pm_domain) {
|
||||||
pm_dev_dbg(dev, state, "completing power domain ");
|
info = "completing power domain ";
|
||||||
if (dev->pm_domain->ops.complete)
|
callback = dev->pm_domain->ops.complete;
|
||||||
dev->pm_domain->ops.complete(dev);
|
|
||||||
} else if (dev->type && dev->type->pm) {
|
} else if (dev->type && dev->type->pm) {
|
||||||
pm_dev_dbg(dev, state, "completing type ");
|
info = "completing type ";
|
||||||
if (dev->type->pm->complete)
|
callback = dev->type->pm->complete;
|
||||||
dev->type->pm->complete(dev);
|
|
||||||
} else if (dev->class && dev->class->pm) {
|
} else if (dev->class && dev->class->pm) {
|
||||||
pm_dev_dbg(dev, state, "completing class ");
|
info = "completing class ";
|
||||||
if (dev->class->pm->complete)
|
callback = dev->class->pm->complete;
|
||||||
dev->class->pm->complete(dev);
|
|
||||||
} else if (dev->bus && dev->bus->pm) {
|
} else if (dev->bus && dev->bus->pm) {
|
||||||
pm_dev_dbg(dev, state, "completing ");
|
info = "completing bus ";
|
||||||
if (dev->bus->pm->complete)
|
callback = dev->bus->pm->complete;
|
||||||
dev->bus->pm->complete(dev);
|
}
|
||||||
|
|
||||||
|
if (!callback && dev->driver && dev->driver->pm) {
|
||||||
|
info = "completing driver ";
|
||||||
|
callback = dev->driver->pm->complete;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (callback) {
|
||||||
|
pm_dev_dbg(dev, state, info);
|
||||||
|
callback(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
device_unlock(dev);
|
device_unlock(dev);
|
||||||
|
@ -763,31 +712,29 @@ static pm_message_t resume_event(pm_message_t sleep_state)
|
||||||
*/
|
*/
|
||||||
static int device_suspend_noirq(struct device *dev, pm_message_t state)
|
static int device_suspend_noirq(struct device *dev, pm_message_t state)
|
||||||
{
|
{
|
||||||
int error;
|
pm_callback_t callback = NULL;
|
||||||
|
char *info = NULL;
|
||||||
|
|
||||||
if (dev->pm_domain) {
|
if (dev->pm_domain) {
|
||||||
pm_dev_dbg(dev, state, "LATE power domain ");
|
info = "LATE power domain ";
|
||||||
error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
|
callback = pm_noirq_op(&dev->pm_domain->ops, state);
|
||||||
if (error)
|
|
||||||
return error;
|
|
||||||
} else if (dev->type && dev->type->pm) {
|
} else if (dev->type && dev->type->pm) {
|
||||||
pm_dev_dbg(dev, state, "LATE type ");
|
info = "LATE type ";
|
||||||
error = pm_noirq_op(dev, dev->type->pm, state);
|
callback = pm_noirq_op(dev->type->pm, state);
|
||||||
if (error)
|
|
||||||
return error;
|
|
||||||
} else if (dev->class && dev->class->pm) {
|
} else if (dev->class && dev->class->pm) {
|
||||||
pm_dev_dbg(dev, state, "LATE class ");
|
info = "LATE class ";
|
||||||
error = pm_noirq_op(dev, dev->class->pm, state);
|
callback = pm_noirq_op(dev->class->pm, state);
|
||||||
if (error)
|
|
||||||
return error;
|
|
||||||
} else if (dev->bus && dev->bus->pm) {
|
} else if (dev->bus && dev->bus->pm) {
|
||||||
pm_dev_dbg(dev, state, "LATE ");
|
info = "LATE bus ";
|
||||||
error = pm_noirq_op(dev, dev->bus->pm, state);
|
callback = pm_noirq_op(dev->bus->pm, state);
|
||||||
if (error)
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
if (!callback && dev->driver && dev->driver->pm) {
|
||||||
|
info = "LATE driver ";
|
||||||
|
callback = pm_noirq_op(dev->driver->pm, state);
|
||||||
|
}
|
||||||
|
|
||||||
|
return dpm_run_callback(callback, dev, state, info);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -864,6 +811,8 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
|
||||||
*/
|
*/
|
||||||
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||||
{
|
{
|
||||||
|
pm_callback_t callback = NULL;
|
||||||
|
char *info = NULL;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
dpm_wait_for_children(dev, async);
|
dpm_wait_for_children(dev, async);
|
||||||
|
@ -884,22 +833,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||||
device_lock(dev);
|
device_lock(dev);
|
||||||
|
|
||||||
if (dev->pm_domain) {
|
if (dev->pm_domain) {
|
||||||
pm_dev_dbg(dev, state, "power domain ");
|
info = "power domain ";
|
||||||
error = pm_op(dev, &dev->pm_domain->ops, state);
|
callback = pm_op(&dev->pm_domain->ops, state);
|
||||||
goto End;
|
goto Run;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev->type && dev->type->pm) {
|
if (dev->type && dev->type->pm) {
|
||||||
pm_dev_dbg(dev, state, "type ");
|
info = "type ";
|
||||||
error = pm_op(dev, dev->type->pm, state);
|
callback = pm_op(dev->type->pm, state);
|
||||||
goto End;
|
goto Run;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev->class) {
|
if (dev->class) {
|
||||||
if (dev->class->pm) {
|
if (dev->class->pm) {
|
||||||
pm_dev_dbg(dev, state, "class ");
|
info = "class ";
|
||||||
error = pm_op(dev, dev->class->pm, state);
|
callback = pm_op(dev->class->pm, state);
|
||||||
goto End;
|
goto Run;
|
||||||
} else if (dev->class->suspend) {
|
} else if (dev->class->suspend) {
|
||||||
pm_dev_dbg(dev, state, "legacy class ");
|
pm_dev_dbg(dev, state, "legacy class ");
|
||||||
error = legacy_suspend(dev, state, dev->class->suspend);
|
error = legacy_suspend(dev, state, dev->class->suspend);
|
||||||
|
@ -909,14 +858,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||||
|
|
||||||
if (dev->bus) {
|
if (dev->bus) {
|
||||||
if (dev->bus->pm) {
|
if (dev->bus->pm) {
|
||||||
pm_dev_dbg(dev, state, "");
|
info = "bus ";
|
||||||
error = pm_op(dev, dev->bus->pm, state);
|
callback = pm_op(dev->bus->pm, state);
|
||||||
} else if (dev->bus->suspend) {
|
} else if (dev->bus->suspend) {
|
||||||
pm_dev_dbg(dev, state, "legacy ");
|
pm_dev_dbg(dev, state, "legacy bus ");
|
||||||
error = legacy_suspend(dev, state, dev->bus->suspend);
|
error = legacy_suspend(dev, state, dev->bus->suspend);
|
||||||
|
goto End;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Run:
|
||||||
|
if (!callback && dev->driver && dev->driver->pm) {
|
||||||
|
info = "driver ";
|
||||||
|
callback = pm_op(dev->driver->pm, state);
|
||||||
|
}
|
||||||
|
|
||||||
|
error = dpm_run_callback(callback, dev, state, info);
|
||||||
|
|
||||||
End:
|
End:
|
||||||
if (!error) {
|
if (!error) {
|
||||||
dev->power.is_suspended = true;
|
dev->power.is_suspended = true;
|
||||||
|
@ -1022,6 +980,8 @@ int dpm_suspend(pm_message_t state)
|
||||||
*/
|
*/
|
||||||
static int device_prepare(struct device *dev, pm_message_t state)
|
static int device_prepare(struct device *dev, pm_message_t state)
|
||||||
{
|
{
|
||||||
|
int (*callback)(struct device *) = NULL;
|
||||||
|
char *info = NULL;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
device_lock(dev);
|
device_lock(dev);
|
||||||
|
@ -1029,34 +989,29 @@ static int device_prepare(struct device *dev, pm_message_t state)
|
||||||
dev->power.wakeup_path = device_may_wakeup(dev);
|
dev->power.wakeup_path = device_may_wakeup(dev);
|
||||||
|
|
||||||
if (dev->pm_domain) {
|
if (dev->pm_domain) {
|
||||||
pm_dev_dbg(dev, state, "preparing power domain ");
|
info = "preparing power domain ";
|
||||||
if (dev->pm_domain->ops.prepare)
|
callback = dev->pm_domain->ops.prepare;
|
||||||
error = dev->pm_domain->ops.prepare(dev);
|
|
||||||
suspend_report_result(dev->pm_domain->ops.prepare, error);
|
|
||||||
if (error)
|
|
||||||
goto End;
|
|
||||||
} else if (dev->type && dev->type->pm) {
|
} else if (dev->type && dev->type->pm) {
|
||||||
pm_dev_dbg(dev, state, "preparing type ");
|
info = "preparing type ";
|
||||||
if (dev->type->pm->prepare)
|
callback = dev->type->pm->prepare;
|
||||||
error = dev->type->pm->prepare(dev);
|
|
||||||
suspend_report_result(dev->type->pm->prepare, error);
|
|
||||||
if (error)
|
|
||||||
goto End;
|
|
||||||
} else if (dev->class && dev->class->pm) {
|
} else if (dev->class && dev->class->pm) {
|
||||||
pm_dev_dbg(dev, state, "preparing class ");
|
info = "preparing class ";
|
||||||
if (dev->class->pm->prepare)
|
callback = dev->class->pm->prepare;
|
||||||
error = dev->class->pm->prepare(dev);
|
|
||||||
suspend_report_result(dev->class->pm->prepare, error);
|
|
||||||
if (error)
|
|
||||||
goto End;
|
|
||||||
} else if (dev->bus && dev->bus->pm) {
|
} else if (dev->bus && dev->bus->pm) {
|
||||||
pm_dev_dbg(dev, state, "preparing ");
|
info = "preparing bus ";
|
||||||
if (dev->bus->pm->prepare)
|
callback = dev->bus->pm->prepare;
|
||||||
error = dev->bus->pm->prepare(dev);
|
}
|
||||||
suspend_report_result(dev->bus->pm->prepare, error);
|
|
||||||
|
if (!callback && dev->driver && dev->driver->pm) {
|
||||||
|
info = "preparing driver ";
|
||||||
|
callback = dev->driver->pm->prepare;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (callback) {
|
||||||
|
error = callback(dev);
|
||||||
|
suspend_report_result(callback, error);
|
||||||
}
|
}
|
||||||
|
|
||||||
End:
|
|
||||||
device_unlock(dev);
|
device_unlock(dev);
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
|
|
|
@ -250,6 +250,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
|
||||||
else
|
else
|
||||||
callback = NULL;
|
callback = NULL;
|
||||||
|
|
||||||
|
if (!callback && dev->driver && dev->driver->pm)
|
||||||
|
callback = dev->driver->pm->runtime_idle;
|
||||||
|
|
||||||
if (callback)
|
if (callback)
|
||||||
__rpm_callback(callback, dev);
|
__rpm_callback(callback, dev);
|
||||||
|
|
||||||
|
@ -413,6 +416,9 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
||||||
else
|
else
|
||||||
callback = NULL;
|
callback = NULL;
|
||||||
|
|
||||||
|
if (!callback && dev->driver && dev->driver->pm)
|
||||||
|
callback = dev->driver->pm->runtime_suspend;
|
||||||
|
|
||||||
retval = rpm_callback(callback, dev);
|
retval = rpm_callback(callback, dev);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
__update_runtime_status(dev, RPM_ACTIVE);
|
__update_runtime_status(dev, RPM_ACTIVE);
|
||||||
|
@ -633,6 +639,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
|
||||||
else
|
else
|
||||||
callback = NULL;
|
callback = NULL;
|
||||||
|
|
||||||
|
if (!callback && dev->driver && dev->driver->pm)
|
||||||
|
callback = dev->driver->pm->runtime_resume;
|
||||||
|
|
||||||
retval = rpm_callback(callback, dev);
|
retval = rpm_callback(callback, dev);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
__update_runtime_status(dev, RPM_SUSPENDED);
|
__update_runtime_status(dev, RPM_SUSPENDED);
|
||||||
|
|
|
@ -475,8 +475,6 @@ static int btmrvl_service_main_thread(void *data)
|
||||||
|
|
||||||
init_waitqueue_entry(&wait, current);
|
init_waitqueue_entry(&wait, current);
|
||||||
|
|
||||||
current->flags |= PF_NOFREEZE;
|
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
add_wait_queue(&thread->wait_q, &wait);
|
add_wait_queue(&thread->wait_q, &wait);
|
||||||
|
|
||||||
|
|
|
@ -214,9 +214,18 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
|
||||||
return error_count;
|
return error_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dmatest_callback(void *completion)
|
/* poor man's completion - we want to use wait_event_freezable() on it */
|
||||||
|
struct dmatest_done {
|
||||||
|
bool done;
|
||||||
|
wait_queue_head_t *wait;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void dmatest_callback(void *arg)
|
||||||
{
|
{
|
||||||
complete(completion);
|
struct dmatest_done *done = arg;
|
||||||
|
|
||||||
|
done->done = true;
|
||||||
|
wake_up_all(done->wait);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -235,7 +244,9 @@ static void dmatest_callback(void *completion)
|
||||||
*/
|
*/
|
||||||
static int dmatest_func(void *data)
|
static int dmatest_func(void *data)
|
||||||
{
|
{
|
||||||
|
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
|
||||||
struct dmatest_thread *thread = data;
|
struct dmatest_thread *thread = data;
|
||||||
|
struct dmatest_done done = { .wait = &done_wait };
|
||||||
struct dma_chan *chan;
|
struct dma_chan *chan;
|
||||||
const char *thread_name;
|
const char *thread_name;
|
||||||
unsigned int src_off, dst_off, len;
|
unsigned int src_off, dst_off, len;
|
||||||
|
@ -252,7 +263,7 @@ static int dmatest_func(void *data)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
thread_name = current->comm;
|
thread_name = current->comm;
|
||||||
set_freezable_with_signal();
|
set_freezable();
|
||||||
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
|
||||||
|
@ -306,9 +317,6 @@ static int dmatest_func(void *data)
|
||||||
struct dma_async_tx_descriptor *tx = NULL;
|
struct dma_async_tx_descriptor *tx = NULL;
|
||||||
dma_addr_t dma_srcs[src_cnt];
|
dma_addr_t dma_srcs[src_cnt];
|
||||||
dma_addr_t dma_dsts[dst_cnt];
|
dma_addr_t dma_dsts[dst_cnt];
|
||||||
struct completion cmp;
|
|
||||||
unsigned long start, tmo, end = 0 /* compiler... */;
|
|
||||||
bool reload = true;
|
|
||||||
u8 align = 0;
|
u8 align = 0;
|
||||||
|
|
||||||
total_tests++;
|
total_tests++;
|
||||||
|
@ -391,9 +399,9 @@ static int dmatest_func(void *data)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
init_completion(&cmp);
|
done.done = false;
|
||||||
tx->callback = dmatest_callback;
|
tx->callback = dmatest_callback;
|
||||||
tx->callback_param = &cmp;
|
tx->callback_param = &done;
|
||||||
cookie = tx->tx_submit(tx);
|
cookie = tx->tx_submit(tx);
|
||||||
|
|
||||||
if (dma_submit_error(cookie)) {
|
if (dma_submit_error(cookie)) {
|
||||||
|
@ -407,20 +415,20 @@ static int dmatest_func(void *data)
|
||||||
}
|
}
|
||||||
dma_async_issue_pending(chan);
|
dma_async_issue_pending(chan);
|
||||||
|
|
||||||
do {
|
wait_event_freezable_timeout(done_wait, done.done,
|
||||||
start = jiffies;
|
msecs_to_jiffies(timeout));
|
||||||
if (reload)
|
|
||||||
end = start + msecs_to_jiffies(timeout);
|
|
||||||
else if (end <= start)
|
|
||||||
end = start + 1;
|
|
||||||
tmo = wait_for_completion_interruptible_timeout(&cmp,
|
|
||||||
end - start);
|
|
||||||
reload = try_to_freeze();
|
|
||||||
} while (tmo == -ERESTARTSYS);
|
|
||||||
|
|
||||||
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
|
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
|
||||||
|
|
||||||
if (tmo == 0) {
|
if (!done.done) {
|
||||||
|
/*
|
||||||
|
* We're leaving the timed out dma operation with
|
||||||
|
* dangling pointer to done_wait. To make this
|
||||||
|
* correct, we'll need to allocate wait_done for
|
||||||
|
* each test iteration and perform "who's gonna
|
||||||
|
* free it this time?" dancing. For now, just
|
||||||
|
* leave it dangling.
|
||||||
|
*/
|
||||||
pr_warning("%s: #%u: test timed out\n",
|
pr_warning("%s: #%u: test timed out\n",
|
||||||
thread_name, total_tests - 1);
|
thread_name, total_tests - 1);
|
||||||
failed_tests++;
|
failed_tests++;
|
||||||
|
|
|
@ -138,8 +138,6 @@ static int twl6030_irq_thread(void *data)
|
||||||
static const unsigned max_i2c_errors = 100;
|
static const unsigned max_i2c_errors = 100;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
current->flags |= PF_NOFREEZE;
|
|
||||||
|
|
||||||
while (!kthread_should_stop()) {
|
while (!kthread_should_stop()) {
|
||||||
int i;
|
int i;
|
||||||
union {
|
union {
|
||||||
|
|
|
@ -750,7 +750,7 @@ static int stir_transmit_thread(void *arg)
|
||||||
|
|
||||||
write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
|
write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
|
||||||
|
|
||||||
refrigerator();
|
try_to_freeze();
|
||||||
|
|
||||||
if (change_speed(stir, stir->speed))
|
if (change_speed(stir, stir->speed))
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -2456,8 +2456,9 @@ static int hotkey_kthread(void *data)
|
||||||
u32 poll_mask, event_mask;
|
u32 poll_mask, event_mask;
|
||||||
unsigned int si, so;
|
unsigned int si, so;
|
||||||
unsigned long t;
|
unsigned long t;
|
||||||
unsigned int change_detector, must_reset;
|
unsigned int change_detector;
|
||||||
unsigned int poll_freq;
|
unsigned int poll_freq;
|
||||||
|
bool was_frozen;
|
||||||
|
|
||||||
mutex_lock(&hotkey_thread_mutex);
|
mutex_lock(&hotkey_thread_mutex);
|
||||||
|
|
||||||
|
@ -2488,14 +2489,14 @@ static int hotkey_kthread(void *data)
|
||||||
t = 100; /* should never happen... */
|
t = 100; /* should never happen... */
|
||||||
}
|
}
|
||||||
t = msleep_interruptible(t);
|
t = msleep_interruptible(t);
|
||||||
if (unlikely(kthread_should_stop()))
|
if (unlikely(kthread_freezable_should_stop(&was_frozen)))
|
||||||
break;
|
break;
|
||||||
must_reset = try_to_freeze();
|
|
||||||
if (t > 0 && !must_reset)
|
if (t > 0 && !was_frozen)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
mutex_lock(&hotkey_thread_data_mutex);
|
mutex_lock(&hotkey_thread_data_mutex);
|
||||||
if (must_reset || hotkey_config_change != change_detector) {
|
if (was_frozen || hotkey_config_change != change_detector) {
|
||||||
/* forget old state on thaw or config change */
|
/* forget old state on thaw or config change */
|
||||||
si = so;
|
si = so;
|
||||||
t = 0;
|
t = 0;
|
||||||
|
@ -2528,10 +2529,6 @@ exit:
|
||||||
static void hotkey_poll_stop_sync(void)
|
static void hotkey_poll_stop_sync(void)
|
||||||
{
|
{
|
||||||
if (tpacpi_hotkey_task) {
|
if (tpacpi_hotkey_task) {
|
||||||
if (frozen(tpacpi_hotkey_task) ||
|
|
||||||
freezing(tpacpi_hotkey_task))
|
|
||||||
thaw_process(tpacpi_hotkey_task);
|
|
||||||
|
|
||||||
kthread_stop(tpacpi_hotkey_task);
|
kthread_stop(tpacpi_hotkey_task);
|
||||||
tpacpi_hotkey_task = NULL;
|
tpacpi_hotkey_task = NULL;
|
||||||
mutex_lock(&hotkey_thread_mutex);
|
mutex_lock(&hotkey_thread_mutex);
|
||||||
|
|
|
@ -466,8 +466,6 @@ static int rtsx_control_thread(void *__dev)
|
||||||
struct rtsx_chip *chip = dev->chip;
|
struct rtsx_chip *chip = dev->chip;
|
||||||
struct Scsi_Host *host = rtsx_to_host(dev);
|
struct Scsi_Host *host = rtsx_to_host(dev);
|
||||||
|
|
||||||
current->flags |= PF_NOFREEZE;
|
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (wait_for_completion_interruptible(&dev->cmnd_ready))
|
if (wait_for_completion_interruptible(&dev->cmnd_ready))
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -831,7 +831,8 @@ static int usb_stor_scan_thread(void * __us)
|
||||||
|
|
||||||
dev_dbg(dev, "device found\n");
|
dev_dbg(dev, "device found\n");
|
||||||
|
|
||||||
set_freezable_with_signal();
|
set_freezable();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wait for the timeout to expire or for a disconnect
|
* Wait for the timeout to expire or for a disconnect
|
||||||
*
|
*
|
||||||
|
@ -839,16 +840,16 @@ static int usb_stor_scan_thread(void * __us)
|
||||||
* fail to freeze, but we can't be non-freezable either. Nor can
|
* fail to freeze, but we can't be non-freezable either. Nor can
|
||||||
* khubd freeze while waiting for scanning to complete as it may
|
* khubd freeze while waiting for scanning to complete as it may
|
||||||
* hold the device lock, causing a hang when suspending devices.
|
* hold the device lock, causing a hang when suspending devices.
|
||||||
* So we request a fake signal when freezing and use
|
* So instead of using wait_event_freezable(), explicitly test
|
||||||
* interruptible sleep to kick us out of our wait early when
|
* for (DONT_SCAN || freezing) in interruptible wait and proceed
|
||||||
* freezing happens.
|
* if any of DONT_SCAN, freezing or timeout has happened.
|
||||||
*/
|
*/
|
||||||
if (delay_use > 0) {
|
if (delay_use > 0) {
|
||||||
dev_dbg(dev, "waiting for device to settle "
|
dev_dbg(dev, "waiting for device to settle "
|
||||||
"before scanning\n");
|
"before scanning\n");
|
||||||
wait_event_interruptible_timeout(us->delay_wait,
|
wait_event_interruptible_timeout(us->delay_wait,
|
||||||
test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
|
test_bit(US_FLIDX_DONT_SCAN, &us->dflags) ||
|
||||||
delay_use * HZ);
|
freezing(current), delay_use * HZ);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If the device is still connected, perform the scanning */
|
/* If the device is still connected, perform the scanning */
|
||||||
|
|
|
@ -334,7 +334,7 @@ again:
|
||||||
if (freezing(current)) {
|
if (freezing(current)) {
|
||||||
worker->working = 0;
|
worker->working = 0;
|
||||||
spin_unlock_irq(&worker->lock);
|
spin_unlock_irq(&worker->lock);
|
||||||
refrigerator();
|
try_to_freeze();
|
||||||
} else {
|
} else {
|
||||||
spin_unlock_irq(&worker->lock);
|
spin_unlock_irq(&worker->lock);
|
||||||
if (!kthread_should_stop()) {
|
if (!kthread_should_stop()) {
|
||||||
|
|
|
@ -1579,9 +1579,7 @@ static int cleaner_kthread(void *arg)
|
||||||
btrfs_run_defrag_inodes(root->fs_info);
|
btrfs_run_defrag_inodes(root->fs_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (freezing(current)) {
|
if (!try_to_freeze()) {
|
||||||
refrigerator();
|
|
||||||
} else {
|
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
if (!kthread_should_stop())
|
if (!kthread_should_stop())
|
||||||
schedule();
|
schedule();
|
||||||
|
@ -1635,9 +1633,7 @@ sleep:
|
||||||
wake_up_process(root->fs_info->cleaner_kthread);
|
wake_up_process(root->fs_info->cleaner_kthread);
|
||||||
mutex_unlock(&root->fs_info->transaction_kthread_mutex);
|
mutex_unlock(&root->fs_info->transaction_kthread_mutex);
|
||||||
|
|
||||||
if (freezing(current)) {
|
if (!try_to_freeze()) {
|
||||||
refrigerator();
|
|
||||||
} else {
|
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
if (!kthread_should_stop() &&
|
if (!kthread_should_stop() &&
|
||||||
!btrfs_transaction_blocked(root->fs_info))
|
!btrfs_transaction_blocked(root->fs_info))
|
||||||
|
|
|
@ -2883,8 +2883,7 @@ cont_thread:
|
||||||
}
|
}
|
||||||
mutex_unlock(&eli->li_list_mtx);
|
mutex_unlock(&eli->li_list_mtx);
|
||||||
|
|
||||||
if (freezing(current))
|
try_to_freeze();
|
||||||
refrigerator();
|
|
||||||
|
|
||||||
cur = jiffies;
|
cur = jiffies;
|
||||||
if ((time_after_eq(cur, next_wakeup)) ||
|
if ((time_after_eq(cur, next_wakeup)) ||
|
||||||
|
|
|
@ -937,7 +937,7 @@ int bdi_writeback_thread(void *data)
|
||||||
|
|
||||||
trace_writeback_thread_start(bdi);
|
trace_writeback_thread_start(bdi);
|
||||||
|
|
||||||
while (!kthread_should_stop()) {
|
while (!kthread_freezable_should_stop(NULL)) {
|
||||||
/*
|
/*
|
||||||
* Remove own delayed wake-up timer, since we are already awake
|
* Remove own delayed wake-up timer, since we are already awake
|
||||||
* and we'll take care of the preriodic write-back.
|
* and we'll take care of the preriodic write-back.
|
||||||
|
@ -967,8 +967,6 @@ int bdi_writeback_thread(void *data)
|
||||||
*/
|
*/
|
||||||
schedule();
|
schedule();
|
||||||
}
|
}
|
||||||
|
|
||||||
try_to_freeze();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Flush any work that raced with us exiting */
|
/* Flush any work that raced with us exiting */
|
||||||
|
|
|
@ -951,8 +951,8 @@ int gfs2_logd(void *data)
|
||||||
wake_up(&sdp->sd_log_waitq);
|
wake_up(&sdp->sd_log_waitq);
|
||||||
|
|
||||||
t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
|
t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
|
||||||
if (freezing(current))
|
|
||||||
refrigerator();
|
try_to_freeze();
|
||||||
|
|
||||||
do {
|
do {
|
||||||
prepare_to_wait(&sdp->sd_logd_waitq, &wait,
|
prepare_to_wait(&sdp->sd_logd_waitq, &wait,
|
||||||
|
|
|
@ -1427,8 +1427,8 @@ int gfs2_quotad(void *data)
|
||||||
/* Check for & recover partially truncated inodes */
|
/* Check for & recover partially truncated inodes */
|
||||||
quotad_check_trunc_list(sdp);
|
quotad_check_trunc_list(sdp);
|
||||||
|
|
||||||
if (freezing(current))
|
try_to_freeze();
|
||||||
refrigerator();
|
|
||||||
t = min(quotad_timeo, statfs_timeo);
|
t = min(quotad_timeo, statfs_timeo);
|
||||||
|
|
||||||
prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
|
prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
|
||||||
|
|
|
@ -166,7 +166,7 @@ loop:
|
||||||
*/
|
*/
|
||||||
jbd_debug(1, "Now suspending kjournald\n");
|
jbd_debug(1, "Now suspending kjournald\n");
|
||||||
spin_unlock(&journal->j_state_lock);
|
spin_unlock(&journal->j_state_lock);
|
||||||
refrigerator();
|
try_to_freeze();
|
||||||
spin_lock(&journal->j_state_lock);
|
spin_lock(&journal->j_state_lock);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -173,7 +173,7 @@ loop:
|
||||||
*/
|
*/
|
||||||
jbd_debug(1, "Now suspending kjournald2\n");
|
jbd_debug(1, "Now suspending kjournald2\n");
|
||||||
write_unlock(&journal->j_state_lock);
|
write_unlock(&journal->j_state_lock);
|
||||||
refrigerator();
|
try_to_freeze();
|
||||||
write_lock(&journal->j_state_lock);
|
write_lock(&journal->j_state_lock);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -2349,7 +2349,7 @@ int jfsIOWait(void *arg)
|
||||||
|
|
||||||
if (freezing(current)) {
|
if (freezing(current)) {
|
||||||
spin_unlock_irq(&log_redrive_lock);
|
spin_unlock_irq(&log_redrive_lock);
|
||||||
refrigerator();
|
try_to_freeze();
|
||||||
} else {
|
} else {
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
spin_unlock_irq(&log_redrive_lock);
|
spin_unlock_irq(&log_redrive_lock);
|
||||||
|
|
|
@ -2800,7 +2800,7 @@ int jfs_lazycommit(void *arg)
|
||||||
|
|
||||||
if (freezing(current)) {
|
if (freezing(current)) {
|
||||||
LAZY_UNLOCK(flags);
|
LAZY_UNLOCK(flags);
|
||||||
refrigerator();
|
try_to_freeze();
|
||||||
} else {
|
} else {
|
||||||
DECLARE_WAITQUEUE(wq, current);
|
DECLARE_WAITQUEUE(wq, current);
|
||||||
|
|
||||||
|
@ -2994,7 +2994,7 @@ int jfs_sync(void *arg)
|
||||||
|
|
||||||
if (freezing(current)) {
|
if (freezing(current)) {
|
||||||
TXN_UNLOCK();
|
TXN_UNLOCK();
|
||||||
refrigerator();
|
try_to_freeze();
|
||||||
} else {
|
} else {
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
TXN_UNLOCK();
|
TXN_UNLOCK();
|
||||||
|
|
|
@ -38,6 +38,7 @@
|
||||||
#include <linux/nfs_xdr.h>
|
#include <linux/nfs_xdr.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/compat.h>
|
#include <linux/compat.h>
|
||||||
|
#include <linux/freezer.h>
|
||||||
|
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
@ -77,7 +78,7 @@ int nfs_wait_bit_killable(void *word)
|
||||||
{
|
{
|
||||||
if (fatal_signal_pending(current))
|
if (fatal_signal_pending(current))
|
||||||
return -ERESTARTSYS;
|
return -ERESTARTSYS;
|
||||||
schedule();
|
freezable_schedule();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#include <linux/nfs_page.h>
|
#include <linux/nfs_page.h>
|
||||||
#include <linux/lockd/bind.h>
|
#include <linux/lockd/bind.h>
|
||||||
#include <linux/nfs_mount.h>
|
#include <linux/nfs_mount.h>
|
||||||
|
#include <linux/freezer.h>
|
||||||
|
|
||||||
#include "iostat.h"
|
#include "iostat.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
@ -32,7 +33,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
|
||||||
res = rpc_call_sync(clnt, msg, flags);
|
res = rpc_call_sync(clnt, msg, flags);
|
||||||
if (res != -EJUKEBOX && res != -EKEYEXPIRED)
|
if (res != -EJUKEBOX && res != -EKEYEXPIRED)
|
||||||
break;
|
break;
|
||||||
schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
|
freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
|
||||||
res = -ERESTARTSYS;
|
res = -ERESTARTSYS;
|
||||||
} while (!fatal_signal_pending(current));
|
} while (!fatal_signal_pending(current));
|
||||||
return res;
|
return res;
|
||||||
|
|
|
@ -55,6 +55,7 @@
|
||||||
#include <linux/sunrpc/bc_xprt.h>
|
#include <linux/sunrpc/bc_xprt.h>
|
||||||
#include <linux/xattr.h>
|
#include <linux/xattr.h>
|
||||||
#include <linux/utsname.h>
|
#include <linux/utsname.h>
|
||||||
|
#include <linux/freezer.h>
|
||||||
|
|
||||||
#include "nfs4_fs.h"
|
#include "nfs4_fs.h"
|
||||||
#include "delegation.h"
|
#include "delegation.h"
|
||||||
|
@ -243,7 +244,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
|
||||||
*timeout = NFS4_POLL_RETRY_MIN;
|
*timeout = NFS4_POLL_RETRY_MIN;
|
||||||
if (*timeout > NFS4_POLL_RETRY_MAX)
|
if (*timeout > NFS4_POLL_RETRY_MAX)
|
||||||
*timeout = NFS4_POLL_RETRY_MAX;
|
*timeout = NFS4_POLL_RETRY_MAX;
|
||||||
schedule_timeout_killable(*timeout);
|
freezable_schedule_timeout_killable(*timeout);
|
||||||
if (fatal_signal_pending(current))
|
if (fatal_signal_pending(current))
|
||||||
res = -ERESTARTSYS;
|
res = -ERESTARTSYS;
|
||||||
*timeout <<= 1;
|
*timeout <<= 1;
|
||||||
|
@ -3958,7 +3959,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
|
||||||
static unsigned long
|
static unsigned long
|
||||||
nfs4_set_lock_task_retry(unsigned long timeout)
|
nfs4_set_lock_task_retry(unsigned long timeout)
|
||||||
{
|
{
|
||||||
schedule_timeout_killable(timeout);
|
freezable_schedule_timeout_killable(timeout);
|
||||||
timeout <<= 1;
|
timeout <<= 1;
|
||||||
if (timeout > NFS4_LOCK_MAXTIMEOUT)
|
if (timeout > NFS4_LOCK_MAXTIMEOUT)
|
||||||
return NFS4_LOCK_MAXTIMEOUT;
|
return NFS4_LOCK_MAXTIMEOUT;
|
||||||
|
|
|
@ -41,6 +41,7 @@
|
||||||
#include <linux/nfs_fs.h>
|
#include <linux/nfs_fs.h>
|
||||||
#include <linux/nfs_page.h>
|
#include <linux/nfs_page.h>
|
||||||
#include <linux/lockd/bind.h>
|
#include <linux/lockd/bind.h>
|
||||||
|
#include <linux/freezer.h>
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
#define NFSDBG_FACILITY NFSDBG_PROC
|
#define NFSDBG_FACILITY NFSDBG_PROC
|
||||||
|
@ -59,7 +60,7 @@ nfs_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
|
||||||
res = rpc_call_sync(clnt, msg, flags);
|
res = rpc_call_sync(clnt, msg, flags);
|
||||||
if (res != -EKEYEXPIRED)
|
if (res != -EKEYEXPIRED)
|
||||||
break;
|
break;
|
||||||
schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
|
freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
|
||||||
res = -ERESTARTSYS;
|
res = -ERESTARTSYS;
|
||||||
} while (!fatal_signal_pending(current));
|
} while (!fatal_signal_pending(current));
|
||||||
return res;
|
return res;
|
||||||
|
|
|
@ -2470,7 +2470,7 @@ static int nilfs_segctor_thread(void *arg)
|
||||||
|
|
||||||
if (freezing(current)) {
|
if (freezing(current)) {
|
||||||
spin_unlock(&sci->sc_state_lock);
|
spin_unlock(&sci->sc_state_lock);
|
||||||
refrigerator();
|
try_to_freeze();
|
||||||
spin_lock(&sci->sc_state_lock);
|
spin_lock(&sci->sc_state_lock);
|
||||||
} else {
|
} else {
|
||||||
DEFINE_WAIT(wait);
|
DEFINE_WAIT(wait);
|
||||||
|
|
|
@ -1703,7 +1703,7 @@ xfsbufd(
|
||||||
|
|
||||||
if (unlikely(freezing(current))) {
|
if (unlikely(freezing(current))) {
|
||||||
set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
|
set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
|
||||||
refrigerator();
|
try_to_freeze();
|
||||||
} else {
|
} else {
|
||||||
clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
|
clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,71 +5,58 @@
|
||||||
|
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/wait.h>
|
#include <linux/wait.h>
|
||||||
|
#include <linux/atomic.h>
|
||||||
|
|
||||||
#ifdef CONFIG_FREEZER
|
#ifdef CONFIG_FREEZER
|
||||||
|
extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
|
||||||
|
extern bool pm_freezing; /* PM freezing in effect */
|
||||||
|
extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if a process has been frozen
|
* Check if a process has been frozen
|
||||||
*/
|
*/
|
||||||
static inline int frozen(struct task_struct *p)
|
static inline bool frozen(struct task_struct *p)
|
||||||
{
|
{
|
||||||
return p->flags & PF_FROZEN;
|
return p->flags & PF_FROZEN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern bool freezing_slow_path(struct task_struct *p);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if there is a request to freeze a process
|
* Check if there is a request to freeze a process
|
||||||
*/
|
*/
|
||||||
static inline int freezing(struct task_struct *p)
|
static inline bool freezing(struct task_struct *p)
|
||||||
{
|
{
|
||||||
return test_tsk_thread_flag(p, TIF_FREEZE);
|
if (likely(!atomic_read(&system_freezing_cnt)))
|
||||||
}
|
return false;
|
||||||
|
return freezing_slow_path(p);
|
||||||
/*
|
|
||||||
* Request that a process be frozen
|
|
||||||
*/
|
|
||||||
static inline void set_freeze_flag(struct task_struct *p)
|
|
||||||
{
|
|
||||||
set_tsk_thread_flag(p, TIF_FREEZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Sometimes we may need to cancel the previous 'freeze' request
|
|
||||||
*/
|
|
||||||
static inline void clear_freeze_flag(struct task_struct *p)
|
|
||||||
{
|
|
||||||
clear_tsk_thread_flag(p, TIF_FREEZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool should_send_signal(struct task_struct *p)
|
|
||||||
{
|
|
||||||
return !(p->flags & PF_FREEZER_NOSIG);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Takes and releases task alloc lock using task_lock() */
|
/* Takes and releases task alloc lock using task_lock() */
|
||||||
extern int thaw_process(struct task_struct *p);
|
extern void __thaw_task(struct task_struct *t);
|
||||||
|
|
||||||
extern void refrigerator(void);
|
extern bool __refrigerator(bool check_kthr_stop);
|
||||||
extern int freeze_processes(void);
|
extern int freeze_processes(void);
|
||||||
extern int freeze_kernel_threads(void);
|
extern int freeze_kernel_threads(void);
|
||||||
extern void thaw_processes(void);
|
extern void thaw_processes(void);
|
||||||
|
|
||||||
static inline int try_to_freeze(void)
|
static inline bool try_to_freeze(void)
|
||||||
{
|
{
|
||||||
if (freezing(current)) {
|
might_sleep();
|
||||||
refrigerator();
|
if (likely(!freezing(current)))
|
||||||
return 1;
|
return false;
|
||||||
} else
|
return __refrigerator(false);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
extern bool freeze_task(struct task_struct *p, bool sig_only);
|
extern bool freeze_task(struct task_struct *p);
|
||||||
extern void cancel_freezing(struct task_struct *p);
|
extern bool set_freezable(void);
|
||||||
|
|
||||||
#ifdef CONFIG_CGROUP_FREEZER
|
#ifdef CONFIG_CGROUP_FREEZER
|
||||||
extern int cgroup_freezing_or_frozen(struct task_struct *task);
|
extern bool cgroup_freezing(struct task_struct *task);
|
||||||
#else /* !CONFIG_CGROUP_FREEZER */
|
#else /* !CONFIG_CGROUP_FREEZER */
|
||||||
static inline int cgroup_freezing_or_frozen(struct task_struct *task)
|
static inline bool cgroup_freezing(struct task_struct *task)
|
||||||
{
|
{
|
||||||
return 0;
|
return false;
|
||||||
}
|
}
|
||||||
#endif /* !CONFIG_CGROUP_FREEZER */
|
#endif /* !CONFIG_CGROUP_FREEZER */
|
||||||
|
|
||||||
|
@ -80,33 +67,27 @@ static inline int cgroup_freezing_or_frozen(struct task_struct *task)
|
||||||
* appropriately in case the child has exited before the freezing of tasks is
|
* appropriately in case the child has exited before the freezing of tasks is
|
||||||
* complete. However, we don't want kernel threads to be frozen in unexpected
|
* complete. However, we don't want kernel threads to be frozen in unexpected
|
||||||
* places, so we allow them to block freeze_processes() instead or to set
|
* places, so we allow them to block freeze_processes() instead or to set
|
||||||
* PF_NOFREEZE if needed and PF_FREEZER_SKIP is only set for userland vfork
|
* PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
|
||||||
* parents. Fortunately, in the ____call_usermodehelper() case the parent won't
|
* parent won't really block freeze_processes(), since ____call_usermodehelper()
|
||||||
* really block freeze_processes(), since ____call_usermodehelper() (the child)
|
* (the child) does a little before exec/exit and it can't be frozen before
|
||||||
* does a little before exec/exit and it can't be frozen before waking up the
|
* waking up the parent.
|
||||||
* parent.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
|
||||||
* If the current task is a user space one, tell the freezer not to count it as
|
/* Tell the freezer not to count the current task as freezable. */
|
||||||
* freezable.
|
|
||||||
*/
|
|
||||||
static inline void freezer_do_not_count(void)
|
static inline void freezer_do_not_count(void)
|
||||||
{
|
{
|
||||||
if (current->mm)
|
current->flags |= PF_FREEZER_SKIP;
|
||||||
current->flags |= PF_FREEZER_SKIP;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the current task is a user space one, tell the freezer to count it as
|
* Tell the freezer to count the current task as freezable again and try to
|
||||||
* freezable again and try to freeze it.
|
* freeze it.
|
||||||
*/
|
*/
|
||||||
static inline void freezer_count(void)
|
static inline void freezer_count(void)
|
||||||
{
|
{
|
||||||
if (current->mm) {
|
current->flags &= ~PF_FREEZER_SKIP;
|
||||||
current->flags &= ~PF_FREEZER_SKIP;
|
try_to_freeze();
|
||||||
try_to_freeze();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -118,21 +99,27 @@ static inline int freezer_should_skip(struct task_struct *p)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Tell the freezer that the current task should be frozen by it
|
* These macros are intended to be used whenever you want allow a task that's
|
||||||
|
* sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note
|
||||||
|
* that neither return any clear indication of whether a freeze event happened
|
||||||
|
* while in this function.
|
||||||
*/
|
*/
|
||||||
static inline void set_freezable(void)
|
|
||||||
{
|
|
||||||
current->flags &= ~PF_NOFREEZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/* Like schedule(), but should not block the freezer. */
|
||||||
* Tell the freezer that the current task should be frozen by it and that it
|
#define freezable_schedule() \
|
||||||
* should send a fake signal to the task to freeze it.
|
({ \
|
||||||
*/
|
freezer_do_not_count(); \
|
||||||
static inline void set_freezable_with_signal(void)
|
schedule(); \
|
||||||
{
|
freezer_count(); \
|
||||||
current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG);
|
})
|
||||||
}
|
|
||||||
|
/* Like schedule_timeout_killable(), but should not block the freezer. */
|
||||||
|
#define freezable_schedule_timeout_killable(timeout) \
|
||||||
|
({ \
|
||||||
|
freezer_do_not_count(); \
|
||||||
|
schedule_timeout_killable(timeout); \
|
||||||
|
freezer_count(); \
|
||||||
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Freezer-friendly wrappers around wait_event_interruptible(),
|
* Freezer-friendly wrappers around wait_event_interruptible(),
|
||||||
|
@ -152,47 +139,51 @@ static inline void set_freezable_with_signal(void)
|
||||||
#define wait_event_freezable(wq, condition) \
|
#define wait_event_freezable(wq, condition) \
|
||||||
({ \
|
({ \
|
||||||
int __retval; \
|
int __retval; \
|
||||||
do { \
|
for (;;) { \
|
||||||
__retval = wait_event_interruptible(wq, \
|
__retval = wait_event_interruptible(wq, \
|
||||||
(condition) || freezing(current)); \
|
(condition) || freezing(current)); \
|
||||||
if (__retval && !freezing(current)) \
|
if (__retval || (condition)) \
|
||||||
break; \
|
break; \
|
||||||
else if (!(condition)) \
|
try_to_freeze(); \
|
||||||
__retval = -ERESTARTSYS; \
|
} \
|
||||||
} while (try_to_freeze()); \
|
|
||||||
__retval; \
|
__retval; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
#define wait_event_freezable_timeout(wq, condition, timeout) \
|
#define wait_event_freezable_timeout(wq, condition, timeout) \
|
||||||
({ \
|
({ \
|
||||||
long __retval = timeout; \
|
long __retval = timeout; \
|
||||||
do { \
|
for (;;) { \
|
||||||
__retval = wait_event_interruptible_timeout(wq, \
|
__retval = wait_event_interruptible_timeout(wq, \
|
||||||
(condition) || freezing(current), \
|
(condition) || freezing(current), \
|
||||||
__retval); \
|
__retval); \
|
||||||
} while (try_to_freeze()); \
|
if (__retval <= 0 || (condition)) \
|
||||||
|
break; \
|
||||||
|
try_to_freeze(); \
|
||||||
|
} \
|
||||||
__retval; \
|
__retval; \
|
||||||
})
|
})
|
||||||
#else /* !CONFIG_FREEZER */
|
|
||||||
static inline int frozen(struct task_struct *p) { return 0; }
|
|
||||||
static inline int freezing(struct task_struct *p) { return 0; }
|
|
||||||
static inline void set_freeze_flag(struct task_struct *p) {}
|
|
||||||
static inline void clear_freeze_flag(struct task_struct *p) {}
|
|
||||||
static inline int thaw_process(struct task_struct *p) { return 1; }
|
|
||||||
|
|
||||||
static inline void refrigerator(void) {}
|
#else /* !CONFIG_FREEZER */
|
||||||
|
static inline bool frozen(struct task_struct *p) { return false; }
|
||||||
|
static inline bool freezing(struct task_struct *p) { return false; }
|
||||||
|
static inline void __thaw_task(struct task_struct *t) {}
|
||||||
|
|
||||||
|
static inline bool __refrigerator(bool check_kthr_stop) { return false; }
|
||||||
static inline int freeze_processes(void) { return -ENOSYS; }
|
static inline int freeze_processes(void) { return -ENOSYS; }
|
||||||
static inline int freeze_kernel_threads(void) { return -ENOSYS; }
|
static inline int freeze_kernel_threads(void) { return -ENOSYS; }
|
||||||
static inline void thaw_processes(void) {}
|
static inline void thaw_processes(void) {}
|
||||||
|
|
||||||
static inline int try_to_freeze(void) { return 0; }
|
static inline bool try_to_freeze(void) { return false; }
|
||||||
|
|
||||||
static inline void freezer_do_not_count(void) {}
|
static inline void freezer_do_not_count(void) {}
|
||||||
static inline void freezer_count(void) {}
|
static inline void freezer_count(void) {}
|
||||||
static inline int freezer_should_skip(struct task_struct *p) { return 0; }
|
static inline int freezer_should_skip(struct task_struct *p) { return 0; }
|
||||||
static inline void set_freezable(void) {}
|
static inline void set_freezable(void) {}
|
||||||
static inline void set_freezable_with_signal(void) {}
|
|
||||||
|
#define freezable_schedule() schedule()
|
||||||
|
|
||||||
|
#define freezable_schedule_timeout_killable(timeout) \
|
||||||
|
schedule_timeout_killable(timeout)
|
||||||
|
|
||||||
#define wait_event_freezable(wq, condition) \
|
#define wait_event_freezable(wq, condition) \
|
||||||
wait_event_interruptible(wq, condition)
|
wait_event_interruptible(wq, condition)
|
||||||
|
|
|
@ -117,5 +117,7 @@ extern void usermodehelper_init(void);
|
||||||
extern int usermodehelper_disable(void);
|
extern int usermodehelper_disable(void);
|
||||||
extern void usermodehelper_enable(void);
|
extern void usermodehelper_enable(void);
|
||||||
extern bool usermodehelper_is_disabled(void);
|
extern bool usermodehelper_is_disabled(void);
|
||||||
|
extern void read_lock_usermodehelper(void);
|
||||||
|
extern void read_unlock_usermodehelper(void);
|
||||||
|
|
||||||
#endif /* __LINUX_KMOD_H__ */
|
#endif /* __LINUX_KMOD_H__ */
|
||||||
|
|
|
@ -35,6 +35,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
|
||||||
void kthread_bind(struct task_struct *k, unsigned int cpu);
|
void kthread_bind(struct task_struct *k, unsigned int cpu);
|
||||||
int kthread_stop(struct task_struct *k);
|
int kthread_stop(struct task_struct *k);
|
||||||
int kthread_should_stop(void);
|
int kthread_should_stop(void);
|
||||||
|
bool kthread_freezable_should_stop(bool *was_frozen);
|
||||||
void *kthread_data(struct task_struct *k);
|
void *kthread_data(struct task_struct *k);
|
||||||
|
|
||||||
int kthreadd(void *unused);
|
int kthreadd(void *unused);
|
||||||
|
|
|
@ -264,62 +264,34 @@ static inline char *early_platform_driver_setup_func(void) \
|
||||||
}
|
}
|
||||||
#endif /* MODULE */
|
#endif /* MODULE */
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
|
||||||
extern int platform_pm_prepare(struct device *dev);
|
|
||||||
extern void platform_pm_complete(struct device *dev);
|
|
||||||
#else
|
|
||||||
#define platform_pm_prepare NULL
|
|
||||||
#define platform_pm_complete NULL
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_SUSPEND
|
#ifdef CONFIG_SUSPEND
|
||||||
extern int platform_pm_suspend(struct device *dev);
|
extern int platform_pm_suspend(struct device *dev);
|
||||||
extern int platform_pm_suspend_noirq(struct device *dev);
|
|
||||||
extern int platform_pm_resume(struct device *dev);
|
extern int platform_pm_resume(struct device *dev);
|
||||||
extern int platform_pm_resume_noirq(struct device *dev);
|
|
||||||
#else
|
#else
|
||||||
#define platform_pm_suspend NULL
|
#define platform_pm_suspend NULL
|
||||||
#define platform_pm_resume NULL
|
#define platform_pm_resume NULL
|
||||||
#define platform_pm_suspend_noirq NULL
|
|
||||||
#define platform_pm_resume_noirq NULL
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
||||||
extern int platform_pm_freeze(struct device *dev);
|
extern int platform_pm_freeze(struct device *dev);
|
||||||
extern int platform_pm_freeze_noirq(struct device *dev);
|
|
||||||
extern int platform_pm_thaw(struct device *dev);
|
extern int platform_pm_thaw(struct device *dev);
|
||||||
extern int platform_pm_thaw_noirq(struct device *dev);
|
|
||||||
extern int platform_pm_poweroff(struct device *dev);
|
extern int platform_pm_poweroff(struct device *dev);
|
||||||
extern int platform_pm_poweroff_noirq(struct device *dev);
|
|
||||||
extern int platform_pm_restore(struct device *dev);
|
extern int platform_pm_restore(struct device *dev);
|
||||||
extern int platform_pm_restore_noirq(struct device *dev);
|
|
||||||
#else
|
#else
|
||||||
#define platform_pm_freeze NULL
|
#define platform_pm_freeze NULL
|
||||||
#define platform_pm_thaw NULL
|
#define platform_pm_thaw NULL
|
||||||
#define platform_pm_poweroff NULL
|
#define platform_pm_poweroff NULL
|
||||||
#define platform_pm_restore NULL
|
#define platform_pm_restore NULL
|
||||||
#define platform_pm_freeze_noirq NULL
|
|
||||||
#define platform_pm_thaw_noirq NULL
|
|
||||||
#define platform_pm_poweroff_noirq NULL
|
|
||||||
#define platform_pm_restore_noirq NULL
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
#define USE_PLATFORM_PM_SLEEP_OPS \
|
#define USE_PLATFORM_PM_SLEEP_OPS \
|
||||||
.prepare = platform_pm_prepare, \
|
|
||||||
.complete = platform_pm_complete, \
|
|
||||||
.suspend = platform_pm_suspend, \
|
.suspend = platform_pm_suspend, \
|
||||||
.resume = platform_pm_resume, \
|
.resume = platform_pm_resume, \
|
||||||
.freeze = platform_pm_freeze, \
|
.freeze = platform_pm_freeze, \
|
||||||
.thaw = platform_pm_thaw, \
|
.thaw = platform_pm_thaw, \
|
||||||
.poweroff = platform_pm_poweroff, \
|
.poweroff = platform_pm_poweroff, \
|
||||||
.restore = platform_pm_restore, \
|
.restore = platform_pm_restore,
|
||||||
.suspend_noirq = platform_pm_suspend_noirq, \
|
|
||||||
.resume_noirq = platform_pm_resume_noirq, \
|
|
||||||
.freeze_noirq = platform_pm_freeze_noirq, \
|
|
||||||
.thaw_noirq = platform_pm_thaw_noirq, \
|
|
||||||
.poweroff_noirq = platform_pm_poweroff_noirq, \
|
|
||||||
.restore_noirq = platform_pm_restore_noirq,
|
|
||||||
#else
|
#else
|
||||||
#define USE_PLATFORM_PM_SLEEP_OPS
|
#define USE_PLATFORM_PM_SLEEP_OPS
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -300,19 +300,6 @@ const struct dev_pm_ops name = { \
|
||||||
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
|
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Use this for subsystems (bus types, device types, device classes) that don't
|
|
||||||
* need any special suspend/resume handling in addition to invoking the PM
|
|
||||||
* callbacks provided by device drivers supporting both the system sleep PM and
|
|
||||||
* runtime PM, make the pm member point to generic_subsys_pm_ops.
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_PM
|
|
||||||
extern struct dev_pm_ops generic_subsys_pm_ops;
|
|
||||||
#define GENERIC_SUBSYS_PM_OPS (&generic_subsys_pm_ops)
|
|
||||||
#else
|
|
||||||
#define GENERIC_SUBSYS_PM_OPS NULL
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* PM_EVENT_ messages
|
* PM_EVENT_ messages
|
||||||
*
|
*
|
||||||
|
|
|
@ -220,7 +220,7 @@ extern char ___assert_task_state[1 - 2*!!(
|
||||||
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
|
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
|
||||||
#define task_contributes_to_load(task) \
|
#define task_contributes_to_load(task) \
|
||||||
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
|
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
|
||||||
(task->flags & PF_FREEZING) == 0)
|
(task->flags & PF_FROZEN) == 0)
|
||||||
|
|
||||||
#define __set_task_state(tsk, state_value) \
|
#define __set_task_state(tsk, state_value) \
|
||||||
do { (tsk)->state = (state_value); } while (0)
|
do { (tsk)->state = (state_value); } while (0)
|
||||||
|
@ -1772,7 +1772,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
|
||||||
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
|
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
|
||||||
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
|
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
|
||||||
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
|
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
|
||||||
#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
|
|
||||||
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
|
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
|
||||||
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
|
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
|
||||||
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
|
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
|
||||||
|
@ -1788,7 +1787,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
|
||||||
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
|
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
|
||||||
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
|
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
|
||||||
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
|
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
|
||||||
#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only the _current_ task can read/write to tsk->flags, but other
|
* Only the _current_ task can read/write to tsk->flags, but other
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/pm.h>
|
#include <linux/pm.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
#include <linux/freezer.h>
|
||||||
#include <asm/errno.h>
|
#include <asm/errno.h>
|
||||||
|
|
||||||
#ifdef CONFIG_VT
|
#ifdef CONFIG_VT
|
||||||
|
@ -331,6 +332,8 @@ static inline bool system_entering_hibernation(void) { return false; }
|
||||||
#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
|
#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
|
||||||
#define PM_POST_RESTORE 0x0006 /* Restore failed */
|
#define PM_POST_RESTORE 0x0006 /* Restore failed */
|
||||||
|
|
||||||
|
extern struct mutex pm_mutex;
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
void save_processor_state(void);
|
void save_processor_state(void);
|
||||||
void restore_processor_state(void);
|
void restore_processor_state(void);
|
||||||
|
@ -351,6 +354,19 @@ extern bool events_check_enabled;
|
||||||
extern bool pm_wakeup_pending(void);
|
extern bool pm_wakeup_pending(void);
|
||||||
extern bool pm_get_wakeup_count(unsigned int *count);
|
extern bool pm_get_wakeup_count(unsigned int *count);
|
||||||
extern bool pm_save_wakeup_count(unsigned int count);
|
extern bool pm_save_wakeup_count(unsigned int count);
|
||||||
|
|
||||||
|
static inline void lock_system_sleep(void)
|
||||||
|
{
|
||||||
|
freezer_do_not_count();
|
||||||
|
mutex_lock(&pm_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void unlock_system_sleep(void)
|
||||||
|
{
|
||||||
|
mutex_unlock(&pm_mutex);
|
||||||
|
freezer_count();
|
||||||
|
}
|
||||||
|
|
||||||
#else /* !CONFIG_PM_SLEEP */
|
#else /* !CONFIG_PM_SLEEP */
|
||||||
|
|
||||||
static inline int register_pm_notifier(struct notifier_block *nb)
|
static inline int register_pm_notifier(struct notifier_block *nb)
|
||||||
|
@ -366,28 +382,11 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
|
||||||
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
|
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||||
|
|
||||||
static inline bool pm_wakeup_pending(void) { return false; }
|
static inline bool pm_wakeup_pending(void) { return false; }
|
||||||
#endif /* !CONFIG_PM_SLEEP */
|
|
||||||
|
|
||||||
extern struct mutex pm_mutex;
|
|
||||||
|
|
||||||
#ifndef CONFIG_HIBERNATE_CALLBACKS
|
|
||||||
static inline void lock_system_sleep(void) {}
|
static inline void lock_system_sleep(void) {}
|
||||||
static inline void unlock_system_sleep(void) {}
|
static inline void unlock_system_sleep(void) {}
|
||||||
|
|
||||||
#else
|
#endif /* !CONFIG_PM_SLEEP */
|
||||||
|
|
||||||
/* Let some subsystems like memory hotadd exclude hibernation */
|
|
||||||
|
|
||||||
static inline void lock_system_sleep(void)
|
|
||||||
{
|
|
||||||
mutex_lock(&pm_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void unlock_system_sleep(void)
|
|
||||||
{
|
|
||||||
mutex_unlock(&pm_mutex);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
|
#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -48,19 +48,17 @@ static inline struct freezer *task_freezer(struct task_struct *task)
|
||||||
struct freezer, css);
|
struct freezer, css);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
|
bool cgroup_freezing(struct task_struct *task)
|
||||||
{
|
{
|
||||||
enum freezer_state state = task_freezer(task)->state;
|
enum freezer_state state;
|
||||||
return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
|
bool ret;
|
||||||
}
|
|
||||||
|
|
||||||
int cgroup_freezing_or_frozen(struct task_struct *task)
|
rcu_read_lock();
|
||||||
{
|
state = task_freezer(task)->state;
|
||||||
int result;
|
ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN;
|
||||||
task_lock(task);
|
rcu_read_unlock();
|
||||||
result = __cgroup_freezing_or_frozen(task);
|
|
||||||
task_unlock(task);
|
return ret;
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -102,9 +100,6 @@ struct cgroup_subsys freezer_subsys;
|
||||||
* freezer_can_attach():
|
* freezer_can_attach():
|
||||||
* cgroup_mutex (held by caller of can_attach)
|
* cgroup_mutex (held by caller of can_attach)
|
||||||
*
|
*
|
||||||
* cgroup_freezing_or_frozen():
|
|
||||||
* task->alloc_lock (to get task's cgroup)
|
|
||||||
*
|
|
||||||
* freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
|
* freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
|
||||||
* freezer->lock
|
* freezer->lock
|
||||||
* sighand->siglock (if the cgroup is freezing)
|
* sighand->siglock (if the cgroup is freezing)
|
||||||
|
@ -130,7 +125,7 @@ struct cgroup_subsys freezer_subsys;
|
||||||
* write_lock css_set_lock (cgroup iterator start)
|
* write_lock css_set_lock (cgroup iterator start)
|
||||||
* task->alloc_lock
|
* task->alloc_lock
|
||||||
* read_lock css_set_lock (cgroup iterator start)
|
* read_lock css_set_lock (cgroup iterator start)
|
||||||
* task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
|
* task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
|
||||||
* sighand->siglock
|
* sighand->siglock
|
||||||
*/
|
*/
|
||||||
static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
|
static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
|
||||||
|
@ -150,7 +145,11 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
|
||||||
static void freezer_destroy(struct cgroup_subsys *ss,
|
static void freezer_destroy(struct cgroup_subsys *ss,
|
||||||
struct cgroup *cgroup)
|
struct cgroup *cgroup)
|
||||||
{
|
{
|
||||||
kfree(cgroup_freezer(cgroup));
|
struct freezer *freezer = cgroup_freezer(cgroup);
|
||||||
|
|
||||||
|
if (freezer->state != CGROUP_THAWED)
|
||||||
|
atomic_dec(&system_freezing_cnt);
|
||||||
|
kfree(freezer);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* task is frozen or will freeze immediately when next it gets woken */
|
/* task is frozen or will freeze immediately when next it gets woken */
|
||||||
|
@ -184,13 +183,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
|
||||||
|
|
||||||
static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
|
static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
rcu_read_lock();
|
return cgroup_freezing(tsk) ? -EBUSY : 0;
|
||||||
if (__cgroup_freezing_or_frozen(tsk)) {
|
|
||||||
rcu_read_unlock();
|
|
||||||
return -EBUSY;
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
|
static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
|
||||||
|
@ -220,7 +213,7 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
|
||||||
|
|
||||||
/* Locking avoids race with FREEZING -> THAWED transitions. */
|
/* Locking avoids race with FREEZING -> THAWED transitions. */
|
||||||
if (freezer->state == CGROUP_FREEZING)
|
if (freezer->state == CGROUP_FREEZING)
|
||||||
freeze_task(task, true);
|
freeze_task(task);
|
||||||
spin_unlock_irq(&freezer->lock);
|
spin_unlock_irq(&freezer->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -238,7 +231,7 @@ static void update_if_frozen(struct cgroup *cgroup,
|
||||||
cgroup_iter_start(cgroup, &it);
|
cgroup_iter_start(cgroup, &it);
|
||||||
while ((task = cgroup_iter_next(cgroup, &it))) {
|
while ((task = cgroup_iter_next(cgroup, &it))) {
|
||||||
ntotal++;
|
ntotal++;
|
||||||
if (is_task_frozen_enough(task))
|
if (freezing(task) && is_task_frozen_enough(task))
|
||||||
nfrozen++;
|
nfrozen++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -286,10 +279,9 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
unsigned int num_cant_freeze_now = 0;
|
unsigned int num_cant_freeze_now = 0;
|
||||||
|
|
||||||
freezer->state = CGROUP_FREEZING;
|
|
||||||
cgroup_iter_start(cgroup, &it);
|
cgroup_iter_start(cgroup, &it);
|
||||||
while ((task = cgroup_iter_next(cgroup, &it))) {
|
while ((task = cgroup_iter_next(cgroup, &it))) {
|
||||||
if (!freeze_task(task, true))
|
if (!freeze_task(task))
|
||||||
continue;
|
continue;
|
||||||
if (is_task_frozen_enough(task))
|
if (is_task_frozen_enough(task))
|
||||||
continue;
|
continue;
|
||||||
|
@ -307,12 +299,9 @@ static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
|
|
||||||
cgroup_iter_start(cgroup, &it);
|
cgroup_iter_start(cgroup, &it);
|
||||||
while ((task = cgroup_iter_next(cgroup, &it))) {
|
while ((task = cgroup_iter_next(cgroup, &it)))
|
||||||
thaw_process(task);
|
__thaw_task(task);
|
||||||
}
|
|
||||||
cgroup_iter_end(cgroup, &it);
|
cgroup_iter_end(cgroup, &it);
|
||||||
|
|
||||||
freezer->state = CGROUP_THAWED;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int freezer_change_state(struct cgroup *cgroup,
|
static int freezer_change_state(struct cgroup *cgroup,
|
||||||
|
@ -326,20 +315,24 @@ static int freezer_change_state(struct cgroup *cgroup,
|
||||||
spin_lock_irq(&freezer->lock);
|
spin_lock_irq(&freezer->lock);
|
||||||
|
|
||||||
update_if_frozen(cgroup, freezer);
|
update_if_frozen(cgroup, freezer);
|
||||||
if (goal_state == freezer->state)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
switch (goal_state) {
|
switch (goal_state) {
|
||||||
case CGROUP_THAWED:
|
case CGROUP_THAWED:
|
||||||
|
if (freezer->state != CGROUP_THAWED)
|
||||||
|
atomic_dec(&system_freezing_cnt);
|
||||||
|
freezer->state = CGROUP_THAWED;
|
||||||
unfreeze_cgroup(cgroup, freezer);
|
unfreeze_cgroup(cgroup, freezer);
|
||||||
break;
|
break;
|
||||||
case CGROUP_FROZEN:
|
case CGROUP_FROZEN:
|
||||||
|
if (freezer->state == CGROUP_THAWED)
|
||||||
|
atomic_inc(&system_freezing_cnt);
|
||||||
|
freezer->state = CGROUP_FREEZING;
|
||||||
retval = try_to_freeze_cgroup(cgroup, freezer);
|
retval = try_to_freeze_cgroup(cgroup, freezer);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
out:
|
|
||||||
spin_unlock_irq(&freezer->lock);
|
spin_unlock_irq(&freezer->lock);
|
||||||
|
|
||||||
return retval;
|
return retval;
|
||||||
|
|
|
@ -679,8 +679,6 @@ static void exit_mm(struct task_struct * tsk)
|
||||||
tsk->mm = NULL;
|
tsk->mm = NULL;
|
||||||
up_read(&mm->mmap_sem);
|
up_read(&mm->mmap_sem);
|
||||||
enter_lazy_tlb(mm, current);
|
enter_lazy_tlb(mm, current);
|
||||||
/* We don't want this task to be frozen prematurely */
|
|
||||||
clear_freeze_flag(tsk);
|
|
||||||
task_unlock(tsk);
|
task_unlock(tsk);
|
||||||
mm_update_next_owner(mm);
|
mm_update_next_owner(mm);
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
|
@ -1040,6 +1038,7 @@ NORET_TYPE void do_exit(long code)
|
||||||
exit_rcu();
|
exit_rcu();
|
||||||
/* causes final put_task_struct in finish_task_switch(). */
|
/* causes final put_task_struct in finish_task_switch(). */
|
||||||
tsk->state = TASK_DEAD;
|
tsk->state = TASK_DEAD;
|
||||||
|
tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
|
||||||
schedule();
|
schedule();
|
||||||
BUG();
|
BUG();
|
||||||
/* Avoid "noreturn function does return". */
|
/* Avoid "noreturn function does return". */
|
||||||
|
|
|
@ -992,7 +992,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
|
||||||
new_flags |= PF_FORKNOEXEC;
|
new_flags |= PF_FORKNOEXEC;
|
||||||
new_flags |= PF_STARTING;
|
new_flags |= PF_STARTING;
|
||||||
p->flags = new_flags;
|
p->flags = new_flags;
|
||||||
clear_freeze_flag(p);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
|
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
|
||||||
|
|
209
kernel/freezer.c
209
kernel/freezer.c
|
@ -9,101 +9,114 @@
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/syscalls.h>
|
#include <linux/syscalls.h>
|
||||||
#include <linux/freezer.h>
|
#include <linux/freezer.h>
|
||||||
|
#include <linux/kthread.h>
|
||||||
|
|
||||||
/*
|
/* total number of freezing conditions in effect */
|
||||||
* freezing is complete, mark current process as frozen
|
atomic_t system_freezing_cnt = ATOMIC_INIT(0);
|
||||||
|
EXPORT_SYMBOL(system_freezing_cnt);
|
||||||
|
|
||||||
|
/* indicate whether PM freezing is in effect, protected by pm_mutex */
|
||||||
|
bool pm_freezing;
|
||||||
|
bool pm_nosig_freezing;
|
||||||
|
|
||||||
|
/* protects freezing and frozen transitions */
|
||||||
|
static DEFINE_SPINLOCK(freezer_lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* freezing_slow_path - slow path for testing whether a task needs to be frozen
|
||||||
|
* @p: task to be tested
|
||||||
|
*
|
||||||
|
* This function is called by freezing() if system_freezing_cnt isn't zero
|
||||||
|
* and tests whether @p needs to enter and stay in frozen state. Can be
|
||||||
|
* called under any context. The freezers are responsible for ensuring the
|
||||||
|
* target tasks see the updated state.
|
||||||
*/
|
*/
|
||||||
static inline void frozen_process(void)
|
bool freezing_slow_path(struct task_struct *p)
|
||||||
{
|
{
|
||||||
if (!unlikely(current->flags & PF_NOFREEZE)) {
|
if (p->flags & PF_NOFREEZE)
|
||||||
current->flags |= PF_FROZEN;
|
return false;
|
||||||
smp_wmb();
|
|
||||||
}
|
if (pm_nosig_freezing || cgroup_freezing(p))
|
||||||
clear_freeze_flag(current);
|
return true;
|
||||||
|
|
||||||
|
if (pm_freezing && !(p->flags & PF_KTHREAD))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(freezing_slow_path);
|
||||||
|
|
||||||
/* Refrigerator is place where frozen processes are stored :-). */
|
/* Refrigerator is place where frozen processes are stored :-). */
|
||||||
void refrigerator(void)
|
bool __refrigerator(bool check_kthr_stop)
|
||||||
{
|
{
|
||||||
/* Hmm, should we be allowed to suspend when there are realtime
|
/* Hmm, should we be allowed to suspend when there are realtime
|
||||||
processes around? */
|
processes around? */
|
||||||
long save;
|
bool was_frozen = false;
|
||||||
|
long save = current->state;
|
||||||
|
|
||||||
task_lock(current);
|
|
||||||
if (freezing(current)) {
|
|
||||||
frozen_process();
|
|
||||||
task_unlock(current);
|
|
||||||
} else {
|
|
||||||
task_unlock(current);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
save = current->state;
|
|
||||||
pr_debug("%s entered refrigerator\n", current->comm);
|
pr_debug("%s entered refrigerator\n", current->comm);
|
||||||
|
|
||||||
spin_lock_irq(¤t->sighand->siglock);
|
|
||||||
recalc_sigpending(); /* We sent fake signal, clean it up */
|
|
||||||
spin_unlock_irq(¤t->sighand->siglock);
|
|
||||||
|
|
||||||
/* prevent accounting of that task to load */
|
|
||||||
current->flags |= PF_FREEZING;
|
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||||
if (!frozen(current))
|
|
||||||
|
spin_lock_irq(&freezer_lock);
|
||||||
|
current->flags |= PF_FROZEN;
|
||||||
|
if (!freezing(current) ||
|
||||||
|
(check_kthr_stop && kthread_should_stop()))
|
||||||
|
current->flags &= ~PF_FROZEN;
|
||||||
|
spin_unlock_irq(&freezer_lock);
|
||||||
|
|
||||||
|
if (!(current->flags & PF_FROZEN))
|
||||||
break;
|
break;
|
||||||
|
was_frozen = true;
|
||||||
schedule();
|
schedule();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Remove the accounting blocker */
|
|
||||||
current->flags &= ~PF_FREEZING;
|
|
||||||
|
|
||||||
pr_debug("%s left refrigerator\n", current->comm);
|
pr_debug("%s left refrigerator\n", current->comm);
|
||||||
__set_current_state(save);
|
|
||||||
|
/*
|
||||||
|
* Restore saved task state before returning. The mb'd version
|
||||||
|
* needs to be used; otherwise, it might silently break
|
||||||
|
* synchronization which depends on ordered task state change.
|
||||||
|
*/
|
||||||
|
set_current_state(save);
|
||||||
|
|
||||||
|
return was_frozen;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(refrigerator);
|
EXPORT_SYMBOL(__refrigerator);
|
||||||
|
|
||||||
static void fake_signal_wake_up(struct task_struct *p)
|
static void fake_signal_wake_up(struct task_struct *p)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&p->sighand->siglock, flags);
|
if (lock_task_sighand(p, &flags)) {
|
||||||
signal_wake_up(p, 0);
|
signal_wake_up(p, 0);
|
||||||
spin_unlock_irqrestore(&p->sighand->siglock, flags);
|
unlock_task_sighand(p, &flags);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* freeze_task - send a freeze request to given task
|
* freeze_task - send a freeze request to given task
|
||||||
* @p: task to send the request to
|
* @p: task to send the request to
|
||||||
* @sig_only: if set, the request will only be sent if the task has the
|
|
||||||
* PF_FREEZER_NOSIG flag unset
|
|
||||||
* Return value: 'false', if @sig_only is set and the task has
|
|
||||||
* PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise
|
|
||||||
*
|
*
|
||||||
* The freeze request is sent by setting the tasks's TIF_FREEZE flag and
|
* If @p is freezing, the freeze request is sent by setting %TIF_FREEZE
|
||||||
* either sending a fake signal to it or waking it up, depending on whether
|
* flag and either sending a fake signal to it or waking it up, depending
|
||||||
* or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task
|
* on whether it has %PF_FREEZER_NOSIG set.
|
||||||
* has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its
|
*
|
||||||
* TIF_FREEZE flag will not be set.
|
* RETURNS:
|
||||||
|
* %false, if @p is not freezing or already frozen; %true, otherwise
|
||||||
*/
|
*/
|
||||||
bool freeze_task(struct task_struct *p, bool sig_only)
|
bool freeze_task(struct task_struct *p)
|
||||||
{
|
{
|
||||||
/*
|
unsigned long flags;
|
||||||
* We first check if the task is freezing and next if it has already
|
|
||||||
* been frozen to avoid the race with frozen_process() which first marks
|
|
||||||
* the task as frozen and next clears its TIF_FREEZE.
|
|
||||||
*/
|
|
||||||
if (!freezing(p)) {
|
|
||||||
smp_rmb();
|
|
||||||
if (frozen(p))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (!sig_only || should_send_signal(p))
|
spin_lock_irqsave(&freezer_lock, flags);
|
||||||
set_freeze_flag(p);
|
if (!freezing(p) || frozen(p)) {
|
||||||
else
|
spin_unlock_irqrestore(&freezer_lock, flags);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (should_send_signal(p)) {
|
if (!(p->flags & PF_KTHREAD)) {
|
||||||
fake_signal_wake_up(p);
|
fake_signal_wake_up(p);
|
||||||
/*
|
/*
|
||||||
* fake_signal_wake_up() goes through p's scheduler
|
* fake_signal_wake_up() goes through p's scheduler
|
||||||
|
@ -111,56 +124,48 @@ bool freeze_task(struct task_struct *p, bool sig_only)
|
||||||
* TASK_RUNNING transition can't race with task state
|
* TASK_RUNNING transition can't race with task state
|
||||||
* testing in try_to_freeze_tasks().
|
* testing in try_to_freeze_tasks().
|
||||||
*/
|
*/
|
||||||
} else if (sig_only) {
|
|
||||||
return false;
|
|
||||||
} else {
|
} else {
|
||||||
wake_up_state(p, TASK_INTERRUPTIBLE);
|
wake_up_state(p, TASK_INTERRUPTIBLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&freezer_lock, flags);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void cancel_freezing(struct task_struct *p)
|
void __thaw_task(struct task_struct *p)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (freezing(p)) {
|
/*
|
||||||
pr_debug(" clean up: %s\n", p->comm);
|
* Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
|
||||||
clear_freeze_flag(p);
|
* be visible to @p as waking up implies wmb. Waking up inside
|
||||||
spin_lock_irqsave(&p->sighand->siglock, flags);
|
* freezer_lock also prevents wakeups from leaking outside
|
||||||
recalc_sigpending_and_wake(p);
|
* refrigerator.
|
||||||
spin_unlock_irqrestore(&p->sighand->siglock, flags);
|
*/
|
||||||
}
|
spin_lock_irqsave(&freezer_lock, flags);
|
||||||
}
|
if (frozen(p))
|
||||||
|
|
||||||
static int __thaw_process(struct task_struct *p)
|
|
||||||
{
|
|
||||||
if (frozen(p)) {
|
|
||||||
p->flags &= ~PF_FROZEN;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
clear_freeze_flag(p);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Wake up a frozen process
|
|
||||||
*
|
|
||||||
* task_lock() is needed to prevent the race with refrigerator() which may
|
|
||||||
* occur if the freezing of tasks fails. Namely, without the lock, if the
|
|
||||||
* freezing of tasks failed, thaw_tasks() might have run before a task in
|
|
||||||
* refrigerator() could call frozen_process(), in which case the task would be
|
|
||||||
* frozen and no one would thaw it.
|
|
||||||
*/
|
|
||||||
int thaw_process(struct task_struct *p)
|
|
||||||
{
|
|
||||||
task_lock(p);
|
|
||||||
if (__thaw_process(p) == 1) {
|
|
||||||
task_unlock(p);
|
|
||||||
wake_up_process(p);
|
wake_up_process(p);
|
||||||
return 1;
|
spin_unlock_irqrestore(&freezer_lock, flags);
|
||||||
}
|
|
||||||
task_unlock(p);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(thaw_process);
|
|
||||||
|
/**
|
||||||
|
* set_freezable - make %current freezable
|
||||||
|
*
|
||||||
|
* Mark %current freezable and enter refrigerator if necessary.
|
||||||
|
*/
|
||||||
|
bool set_freezable(void)
|
||||||
|
{
|
||||||
|
might_sleep();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Modify flags while holding freezer_lock. This ensures the
|
||||||
|
* freezer notices that we aren't frozen yet or the freezing
|
||||||
|
* condition is visible to try_to_freeze() below.
|
||||||
|
*/
|
||||||
|
spin_lock_irq(&freezer_lock);
|
||||||
|
current->flags &= ~PF_NOFREEZE;
|
||||||
|
spin_unlock_irq(&freezer_lock);
|
||||||
|
|
||||||
|
return try_to_freeze();
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(set_freezable);
|
||||||
|
|
|
@ -1523,7 +1523,7 @@ int kernel_kexec(void)
|
||||||
|
|
||||||
#ifdef CONFIG_KEXEC_JUMP
|
#ifdef CONFIG_KEXEC_JUMP
|
||||||
if (kexec_image->preserve_context) {
|
if (kexec_image->preserve_context) {
|
||||||
mutex_lock(&pm_mutex);
|
lock_system_sleep();
|
||||||
pm_prepare_console();
|
pm_prepare_console();
|
||||||
error = freeze_processes();
|
error = freeze_processes();
|
||||||
if (error) {
|
if (error) {
|
||||||
|
@ -1576,7 +1576,7 @@ int kernel_kexec(void)
|
||||||
thaw_processes();
|
thaw_processes();
|
||||||
Restore_console:
|
Restore_console:
|
||||||
pm_restore_console();
|
pm_restore_console();
|
||||||
mutex_unlock(&pm_mutex);
|
unlock_system_sleep();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -34,6 +34,9 @@
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/resource.h>
|
#include <linux/resource.h>
|
||||||
|
#include <linux/notifier.h>
|
||||||
|
#include <linux/suspend.h>
|
||||||
|
#include <linux/rwsem.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
|
||||||
#include <trace/events/module.h>
|
#include <trace/events/module.h>
|
||||||
|
@ -48,6 +51,7 @@ static struct workqueue_struct *khelper_wq;
|
||||||
static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
|
static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
|
||||||
static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
|
static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
|
||||||
static DEFINE_SPINLOCK(umh_sysctl_lock);
|
static DEFINE_SPINLOCK(umh_sysctl_lock);
|
||||||
|
static DECLARE_RWSEM(umhelper_sem);
|
||||||
|
|
||||||
#ifdef CONFIG_MODULES
|
#ifdef CONFIG_MODULES
|
||||||
|
|
||||||
|
@ -273,6 +277,7 @@ static void __call_usermodehelper(struct work_struct *work)
|
||||||
* If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
|
* If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
|
||||||
* (used for preventing user land processes from being created after the user
|
* (used for preventing user land processes from being created after the user
|
||||||
* land has been frozen during a system-wide hibernation or suspend operation).
|
* land has been frozen during a system-wide hibernation or suspend operation).
|
||||||
|
* Should always be manipulated under umhelper_sem acquired for write.
|
||||||
*/
|
*/
|
||||||
static int usermodehelper_disabled = 1;
|
static int usermodehelper_disabled = 1;
|
||||||
|
|
||||||
|
@ -291,6 +296,18 @@ static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
|
||||||
*/
|
*/
|
||||||
#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
|
#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
|
||||||
|
|
||||||
|
void read_lock_usermodehelper(void)
|
||||||
|
{
|
||||||
|
down_read(&umhelper_sem);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(read_lock_usermodehelper);
|
||||||
|
|
||||||
|
void read_unlock_usermodehelper(void)
|
||||||
|
{
|
||||||
|
up_read(&umhelper_sem);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(read_unlock_usermodehelper);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* usermodehelper_disable - prevent new helpers from being started
|
* usermodehelper_disable - prevent new helpers from being started
|
||||||
*/
|
*/
|
||||||
|
@ -298,8 +315,10 @@ int usermodehelper_disable(void)
|
||||||
{
|
{
|
||||||
long retval;
|
long retval;
|
||||||
|
|
||||||
|
down_write(&umhelper_sem);
|
||||||
usermodehelper_disabled = 1;
|
usermodehelper_disabled = 1;
|
||||||
smp_mb();
|
up_write(&umhelper_sem);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* From now on call_usermodehelper_exec() won't start any new
|
* From now on call_usermodehelper_exec() won't start any new
|
||||||
* helpers, so it is sufficient if running_helpers turns out to
|
* helpers, so it is sufficient if running_helpers turns out to
|
||||||
|
@ -312,7 +331,9 @@ int usermodehelper_disable(void)
|
||||||
if (retval)
|
if (retval)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
down_write(&umhelper_sem);
|
||||||
usermodehelper_disabled = 0;
|
usermodehelper_disabled = 0;
|
||||||
|
up_write(&umhelper_sem);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -321,7 +342,9 @@ int usermodehelper_disable(void)
|
||||||
*/
|
*/
|
||||||
void usermodehelper_enable(void)
|
void usermodehelper_enable(void)
|
||||||
{
|
{
|
||||||
|
down_write(&umhelper_sem);
|
||||||
usermodehelper_disabled = 0;
|
usermodehelper_disabled = 0;
|
||||||
|
up_write(&umhelper_sem);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -58,6 +58,31 @@ int kthread_should_stop(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kthread_should_stop);
|
EXPORT_SYMBOL(kthread_should_stop);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kthread_freezable_should_stop - should this freezable kthread return now?
|
||||||
|
* @was_frozen: optional out parameter, indicates whether %current was frozen
|
||||||
|
*
|
||||||
|
* kthread_should_stop() for freezable kthreads, which will enter
|
||||||
|
* refrigerator if necessary. This function is safe from kthread_stop() /
|
||||||
|
* freezer deadlock and freezable kthreads should use this function instead
|
||||||
|
* of calling try_to_freeze() directly.
|
||||||
|
*/
|
||||||
|
bool kthread_freezable_should_stop(bool *was_frozen)
|
||||||
|
{
|
||||||
|
bool frozen = false;
|
||||||
|
|
||||||
|
might_sleep();
|
||||||
|
|
||||||
|
if (unlikely(freezing(current)))
|
||||||
|
frozen = __refrigerator(true);
|
||||||
|
|
||||||
|
if (was_frozen)
|
||||||
|
*was_frozen = frozen;
|
||||||
|
|
||||||
|
return kthread_should_stop();
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kthread_data - return data value specified on kthread creation
|
* kthread_data - return data value specified on kthread creation
|
||||||
* @task: kthread task in question
|
* @task: kthread task in question
|
||||||
|
@ -257,7 +282,7 @@ int kthreadd(void *unused)
|
||||||
set_cpus_allowed_ptr(tsk, cpu_all_mask);
|
set_cpus_allowed_ptr(tsk, cpu_all_mask);
|
||||||
set_mems_allowed(node_states[N_HIGH_MEMORY]);
|
set_mems_allowed(node_states[N_HIGH_MEMORY]);
|
||||||
|
|
||||||
current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
|
current->flags |= PF_NOFREEZE;
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
|
|
@ -43,8 +43,6 @@ int in_suspend __nosavedata;
|
||||||
enum {
|
enum {
|
||||||
HIBERNATION_INVALID,
|
HIBERNATION_INVALID,
|
||||||
HIBERNATION_PLATFORM,
|
HIBERNATION_PLATFORM,
|
||||||
HIBERNATION_TEST,
|
|
||||||
HIBERNATION_TESTPROC,
|
|
||||||
HIBERNATION_SHUTDOWN,
|
HIBERNATION_SHUTDOWN,
|
||||||
HIBERNATION_REBOOT,
|
HIBERNATION_REBOOT,
|
||||||
/* keep last */
|
/* keep last */
|
||||||
|
@ -55,7 +53,7 @@ enum {
|
||||||
|
|
||||||
static int hibernation_mode = HIBERNATION_SHUTDOWN;
|
static int hibernation_mode = HIBERNATION_SHUTDOWN;
|
||||||
|
|
||||||
static bool freezer_test_done;
|
bool freezer_test_done;
|
||||||
|
|
||||||
static const struct platform_hibernation_ops *hibernation_ops;
|
static const struct platform_hibernation_ops *hibernation_ops;
|
||||||
|
|
||||||
|
@ -71,14 +69,14 @@ void hibernation_set_ops(const struct platform_hibernation_ops *ops)
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
mutex_lock(&pm_mutex);
|
lock_system_sleep();
|
||||||
hibernation_ops = ops;
|
hibernation_ops = ops;
|
||||||
if (ops)
|
if (ops)
|
||||||
hibernation_mode = HIBERNATION_PLATFORM;
|
hibernation_mode = HIBERNATION_PLATFORM;
|
||||||
else if (hibernation_mode == HIBERNATION_PLATFORM)
|
else if (hibernation_mode == HIBERNATION_PLATFORM)
|
||||||
hibernation_mode = HIBERNATION_SHUTDOWN;
|
hibernation_mode = HIBERNATION_SHUTDOWN;
|
||||||
|
|
||||||
mutex_unlock(&pm_mutex);
|
unlock_system_sleep();
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool entering_platform_hibernation;
|
static bool entering_platform_hibernation;
|
||||||
|
@ -96,15 +94,6 @@ static void hibernation_debug_sleep(void)
|
||||||
mdelay(5000);
|
mdelay(5000);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hibernation_testmode(int mode)
|
|
||||||
{
|
|
||||||
if (hibernation_mode == mode) {
|
|
||||||
hibernation_debug_sleep();
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int hibernation_test(int level)
|
static int hibernation_test(int level)
|
||||||
{
|
{
|
||||||
if (pm_test_level == level) {
|
if (pm_test_level == level) {
|
||||||
|
@ -114,7 +103,6 @@ static int hibernation_test(int level)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#else /* !CONFIG_PM_DEBUG */
|
#else /* !CONFIG_PM_DEBUG */
|
||||||
static int hibernation_testmode(int mode) { return 0; }
|
|
||||||
static int hibernation_test(int level) { return 0; }
|
static int hibernation_test(int level) { return 0; }
|
||||||
#endif /* !CONFIG_PM_DEBUG */
|
#endif /* !CONFIG_PM_DEBUG */
|
||||||
|
|
||||||
|
@ -278,8 +266,7 @@ static int create_image(int platform_mode)
|
||||||
goto Platform_finish;
|
goto Platform_finish;
|
||||||
|
|
||||||
error = disable_nonboot_cpus();
|
error = disable_nonboot_cpus();
|
||||||
if (error || hibernation_test(TEST_CPUS)
|
if (error || hibernation_test(TEST_CPUS))
|
||||||
|| hibernation_testmode(HIBERNATION_TEST))
|
|
||||||
goto Enable_cpus;
|
goto Enable_cpus;
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
@ -333,7 +320,7 @@ static int create_image(int platform_mode)
|
||||||
*/
|
*/
|
||||||
int hibernation_snapshot(int platform_mode)
|
int hibernation_snapshot(int platform_mode)
|
||||||
{
|
{
|
||||||
pm_message_t msg = PMSG_RECOVER;
|
pm_message_t msg;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
error = platform_begin(platform_mode);
|
error = platform_begin(platform_mode);
|
||||||
|
@ -349,8 +336,7 @@ int hibernation_snapshot(int platform_mode)
|
||||||
if (error)
|
if (error)
|
||||||
goto Cleanup;
|
goto Cleanup;
|
||||||
|
|
||||||
if (hibernation_test(TEST_FREEZER) ||
|
if (hibernation_test(TEST_FREEZER)) {
|
||||||
hibernation_testmode(HIBERNATION_TESTPROC)) {
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Indicate to the caller that we are returning due to a
|
* Indicate to the caller that we are returning due to a
|
||||||
|
@ -362,26 +348,26 @@ int hibernation_snapshot(int platform_mode)
|
||||||
|
|
||||||
error = dpm_prepare(PMSG_FREEZE);
|
error = dpm_prepare(PMSG_FREEZE);
|
||||||
if (error) {
|
if (error) {
|
||||||
dpm_complete(msg);
|
dpm_complete(PMSG_RECOVER);
|
||||||
goto Cleanup;
|
goto Cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
suspend_console();
|
suspend_console();
|
||||||
pm_restrict_gfp_mask();
|
pm_restrict_gfp_mask();
|
||||||
|
|
||||||
error = dpm_suspend(PMSG_FREEZE);
|
error = dpm_suspend(PMSG_FREEZE);
|
||||||
if (error)
|
|
||||||
goto Recover_platform;
|
|
||||||
|
|
||||||
if (hibernation_test(TEST_DEVICES))
|
if (error || hibernation_test(TEST_DEVICES))
|
||||||
goto Recover_platform;
|
platform_recover(platform_mode);
|
||||||
|
else
|
||||||
|
error = create_image(platform_mode);
|
||||||
|
|
||||||
error = create_image(platform_mode);
|
|
||||||
/*
|
/*
|
||||||
* Control returns here (1) after the image has been created or the
|
* In the case that we call create_image() above, the control
|
||||||
|
* returns here (1) after the image has been created or the
|
||||||
* image creation has failed and (2) after a successful restore.
|
* image creation has failed and (2) after a successful restore.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
Resume_devices:
|
|
||||||
/* We may need to release the preallocated image pages here. */
|
/* We may need to release the preallocated image pages here. */
|
||||||
if (error || !in_suspend)
|
if (error || !in_suspend)
|
||||||
swsusp_free();
|
swsusp_free();
|
||||||
|
@ -399,10 +385,6 @@ int hibernation_snapshot(int platform_mode)
|
||||||
platform_end(platform_mode);
|
platform_end(platform_mode);
|
||||||
return error;
|
return error;
|
||||||
|
|
||||||
Recover_platform:
|
|
||||||
platform_recover(platform_mode);
|
|
||||||
goto Resume_devices;
|
|
||||||
|
|
||||||
Cleanup:
|
Cleanup:
|
||||||
swsusp_free();
|
swsusp_free();
|
||||||
goto Close;
|
goto Close;
|
||||||
|
@ -590,9 +572,6 @@ int hibernation_platform_enter(void)
|
||||||
static void power_down(void)
|
static void power_down(void)
|
||||||
{
|
{
|
||||||
switch (hibernation_mode) {
|
switch (hibernation_mode) {
|
||||||
case HIBERNATION_TEST:
|
|
||||||
case HIBERNATION_TESTPROC:
|
|
||||||
break;
|
|
||||||
case HIBERNATION_REBOOT:
|
case HIBERNATION_REBOOT:
|
||||||
kernel_restart(NULL);
|
kernel_restart(NULL);
|
||||||
break;
|
break;
|
||||||
|
@ -611,17 +590,6 @@ static void power_down(void)
|
||||||
while(1);
|
while(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int prepare_processes(void)
|
|
||||||
{
|
|
||||||
int error = 0;
|
|
||||||
|
|
||||||
if (freeze_processes()) {
|
|
||||||
error = -EBUSY;
|
|
||||||
thaw_processes();
|
|
||||||
}
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* hibernate - Carry out system hibernation, including saving the image.
|
* hibernate - Carry out system hibernation, including saving the image.
|
||||||
*/
|
*/
|
||||||
|
@ -629,7 +597,7 @@ int hibernate(void)
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
mutex_lock(&pm_mutex);
|
lock_system_sleep();
|
||||||
/* The snapshot device should not be opened while we're running */
|
/* The snapshot device should not be opened while we're running */
|
||||||
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
|
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
|
||||||
error = -EBUSY;
|
error = -EBUSY;
|
||||||
|
@ -654,7 +622,7 @@ int hibernate(void)
|
||||||
sys_sync();
|
sys_sync();
|
||||||
printk("done.\n");
|
printk("done.\n");
|
||||||
|
|
||||||
error = prepare_processes();
|
error = freeze_processes();
|
||||||
if (error)
|
if (error)
|
||||||
goto Finish;
|
goto Finish;
|
||||||
|
|
||||||
|
@ -697,7 +665,7 @@ int hibernate(void)
|
||||||
pm_restore_console();
|
pm_restore_console();
|
||||||
atomic_inc(&snapshot_device_available);
|
atomic_inc(&snapshot_device_available);
|
||||||
Unlock:
|
Unlock:
|
||||||
mutex_unlock(&pm_mutex);
|
unlock_system_sleep();
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -811,11 +779,13 @@ static int software_resume(void)
|
||||||
goto close_finish;
|
goto close_finish;
|
||||||
|
|
||||||
error = create_basic_memory_bitmaps();
|
error = create_basic_memory_bitmaps();
|
||||||
if (error)
|
if (error) {
|
||||||
|
usermodehelper_enable();
|
||||||
goto close_finish;
|
goto close_finish;
|
||||||
|
}
|
||||||
|
|
||||||
pr_debug("PM: Preparing processes for restore.\n");
|
pr_debug("PM: Preparing processes for restore.\n");
|
||||||
error = prepare_processes();
|
error = freeze_processes();
|
||||||
if (error) {
|
if (error) {
|
||||||
swsusp_close(FMODE_READ);
|
swsusp_close(FMODE_READ);
|
||||||
goto Done;
|
goto Done;
|
||||||
|
@ -855,8 +825,6 @@ static const char * const hibernation_modes[] = {
|
||||||
[HIBERNATION_PLATFORM] = "platform",
|
[HIBERNATION_PLATFORM] = "platform",
|
||||||
[HIBERNATION_SHUTDOWN] = "shutdown",
|
[HIBERNATION_SHUTDOWN] = "shutdown",
|
||||||
[HIBERNATION_REBOOT] = "reboot",
|
[HIBERNATION_REBOOT] = "reboot",
|
||||||
[HIBERNATION_TEST] = "test",
|
|
||||||
[HIBERNATION_TESTPROC] = "testproc",
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -865,17 +833,15 @@ static const char * const hibernation_modes[] = {
|
||||||
* Hibernation can be handled in several ways. There are a few different ways
|
* Hibernation can be handled in several ways. There are a few different ways
|
||||||
* to put the system into the sleep state: using the platform driver (e.g. ACPI
|
* to put the system into the sleep state: using the platform driver (e.g. ACPI
|
||||||
* or other hibernation_ops), powering it off or rebooting it (for testing
|
* or other hibernation_ops), powering it off or rebooting it (for testing
|
||||||
* mostly), or using one of the two available test modes.
|
* mostly).
|
||||||
*
|
*
|
||||||
* The sysfs file /sys/power/disk provides an interface for selecting the
|
* The sysfs file /sys/power/disk provides an interface for selecting the
|
||||||
* hibernation mode to use. Reading from this file causes the available modes
|
* hibernation mode to use. Reading from this file causes the available modes
|
||||||
* to be printed. There are 5 modes that can be supported:
|
* to be printed. There are 3 modes that can be supported:
|
||||||
*
|
*
|
||||||
* 'platform'
|
* 'platform'
|
||||||
* 'shutdown'
|
* 'shutdown'
|
||||||
* 'reboot'
|
* 'reboot'
|
||||||
* 'test'
|
|
||||||
* 'testproc'
|
|
||||||
*
|
*
|
||||||
* If a platform hibernation driver is in use, 'platform' will be supported
|
* If a platform hibernation driver is in use, 'platform' will be supported
|
||||||
* and will be used by default. Otherwise, 'shutdown' will be used by default.
|
* and will be used by default. Otherwise, 'shutdown' will be used by default.
|
||||||
|
@ -899,8 +865,6 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||||
switch (i) {
|
switch (i) {
|
||||||
case HIBERNATION_SHUTDOWN:
|
case HIBERNATION_SHUTDOWN:
|
||||||
case HIBERNATION_REBOOT:
|
case HIBERNATION_REBOOT:
|
||||||
case HIBERNATION_TEST:
|
|
||||||
case HIBERNATION_TESTPROC:
|
|
||||||
break;
|
break;
|
||||||
case HIBERNATION_PLATFORM:
|
case HIBERNATION_PLATFORM:
|
||||||
if (hibernation_ops)
|
if (hibernation_ops)
|
||||||
|
@ -929,7 +893,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||||
p = memchr(buf, '\n', n);
|
p = memchr(buf, '\n', n);
|
||||||
len = p ? p - buf : n;
|
len = p ? p - buf : n;
|
||||||
|
|
||||||
mutex_lock(&pm_mutex);
|
lock_system_sleep();
|
||||||
for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
|
for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
|
||||||
if (len == strlen(hibernation_modes[i])
|
if (len == strlen(hibernation_modes[i])
|
||||||
&& !strncmp(buf, hibernation_modes[i], len)) {
|
&& !strncmp(buf, hibernation_modes[i], len)) {
|
||||||
|
@ -941,8 +905,6 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||||
switch (mode) {
|
switch (mode) {
|
||||||
case HIBERNATION_SHUTDOWN:
|
case HIBERNATION_SHUTDOWN:
|
||||||
case HIBERNATION_REBOOT:
|
case HIBERNATION_REBOOT:
|
||||||
case HIBERNATION_TEST:
|
|
||||||
case HIBERNATION_TESTPROC:
|
|
||||||
hibernation_mode = mode;
|
hibernation_mode = mode;
|
||||||
break;
|
break;
|
||||||
case HIBERNATION_PLATFORM:
|
case HIBERNATION_PLATFORM:
|
||||||
|
@ -957,7 +919,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||||
if (!error)
|
if (!error)
|
||||||
pr_debug("PM: Hibernation mode set to '%s'\n",
|
pr_debug("PM: Hibernation mode set to '%s'\n",
|
||||||
hibernation_modes[mode]);
|
hibernation_modes[mode]);
|
||||||
mutex_unlock(&pm_mutex);
|
unlock_system_sleep();
|
||||||
return error ? error : n;
|
return error ? error : n;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -984,9 +946,9 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||||
if (maj != MAJOR(res) || min != MINOR(res))
|
if (maj != MAJOR(res) || min != MINOR(res))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
mutex_lock(&pm_mutex);
|
lock_system_sleep();
|
||||||
swsusp_resume_device = res;
|
swsusp_resume_device = res;
|
||||||
mutex_unlock(&pm_mutex);
|
unlock_system_sleep();
|
||||||
printk(KERN_INFO "PM: Starting manual resume from disk\n");
|
printk(KERN_INFO "PM: Starting manual resume from disk\n");
|
||||||
noresume = 0;
|
noresume = 0;
|
||||||
software_resume();
|
software_resume();
|
||||||
|
|
|
@ -116,7 +116,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||||
p = memchr(buf, '\n', n);
|
p = memchr(buf, '\n', n);
|
||||||
len = p ? p - buf : n;
|
len = p ? p - buf : n;
|
||||||
|
|
||||||
mutex_lock(&pm_mutex);
|
lock_system_sleep();
|
||||||
|
|
||||||
level = TEST_FIRST;
|
level = TEST_FIRST;
|
||||||
for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
|
for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
|
||||||
|
@ -126,7 +126,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&pm_mutex);
|
unlock_system_sleep();
|
||||||
|
|
||||||
return error ? error : n;
|
return error ? error : n;
|
||||||
}
|
}
|
||||||
|
@ -282,7 +282,7 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||||
/* First, check if we are requested to hibernate */
|
/* First, check if we are requested to hibernate */
|
||||||
if (len == 4 && !strncmp(buf, "disk", len)) {
|
if (len == 4 && !strncmp(buf, "disk", len)) {
|
||||||
error = hibernate();
|
error = hibernate();
|
||||||
goto Exit;
|
goto Exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SUSPEND
|
#ifdef CONFIG_SUSPEND
|
||||||
|
|
|
@ -50,6 +50,8 @@ static inline char *check_image_kernel(struct swsusp_info *info)
|
||||||
#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
|
#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
|
||||||
|
|
||||||
/* kernel/power/hibernate.c */
|
/* kernel/power/hibernate.c */
|
||||||
|
extern bool freezer_test_done;
|
||||||
|
|
||||||
extern int hibernation_snapshot(int platform_mode);
|
extern int hibernation_snapshot(int platform_mode);
|
||||||
extern int hibernation_restore(int platform_mode);
|
extern int hibernation_restore(int platform_mode);
|
||||||
extern int hibernation_platform_enter(void);
|
extern int hibernation_platform_enter(void);
|
||||||
|
|
|
@ -22,16 +22,7 @@
|
||||||
*/
|
*/
|
||||||
#define TIMEOUT (20 * HZ)
|
#define TIMEOUT (20 * HZ)
|
||||||
|
|
||||||
static inline int freezable(struct task_struct * p)
|
static int try_to_freeze_tasks(bool user_only)
|
||||||
{
|
|
||||||
if ((p == current) ||
|
|
||||||
(p->flags & PF_NOFREEZE) ||
|
|
||||||
(p->exit_state != 0))
|
|
||||||
return 0;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int try_to_freeze_tasks(bool sig_only)
|
|
||||||
{
|
{
|
||||||
struct task_struct *g, *p;
|
struct task_struct *g, *p;
|
||||||
unsigned long end_time;
|
unsigned long end_time;
|
||||||
|
@ -46,17 +37,14 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||||
|
|
||||||
end_time = jiffies + TIMEOUT;
|
end_time = jiffies + TIMEOUT;
|
||||||
|
|
||||||
if (!sig_only)
|
if (!user_only)
|
||||||
freeze_workqueues_begin();
|
freeze_workqueues_begin();
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
todo = 0;
|
todo = 0;
|
||||||
read_lock(&tasklist_lock);
|
read_lock(&tasklist_lock);
|
||||||
do_each_thread(g, p) {
|
do_each_thread(g, p) {
|
||||||
if (frozen(p) || !freezable(p))
|
if (p == current || !freeze_task(p))
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!freeze_task(p, sig_only))
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -77,7 +65,7 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||||
} while_each_thread(g, p);
|
} while_each_thread(g, p);
|
||||||
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
||||||
|
|
||||||
if (!sig_only) {
|
if (!user_only) {
|
||||||
wq_busy = freeze_workqueues_busy();
|
wq_busy = freeze_workqueues_busy();
|
||||||
todo += wq_busy;
|
todo += wq_busy;
|
||||||
}
|
}
|
||||||
|
@ -103,11 +91,6 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||||
elapsed_csecs = elapsed_csecs64;
|
elapsed_csecs = elapsed_csecs64;
|
||||||
|
|
||||||
if (todo) {
|
if (todo) {
|
||||||
/* This does not unfreeze processes that are already frozen
|
|
||||||
* (we have slightly ugly calling convention in that respect,
|
|
||||||
* and caller must call thaw_processes() if something fails),
|
|
||||||
* but it cleans up leftover PF_FREEZE requests.
|
|
||||||
*/
|
|
||||||
printk("\n");
|
printk("\n");
|
||||||
printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
|
printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
|
||||||
"(%d tasks refusing to freeze, wq_busy=%d):\n",
|
"(%d tasks refusing to freeze, wq_busy=%d):\n",
|
||||||
|
@ -115,15 +98,11 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||||
elapsed_csecs / 100, elapsed_csecs % 100,
|
elapsed_csecs / 100, elapsed_csecs % 100,
|
||||||
todo - wq_busy, wq_busy);
|
todo - wq_busy, wq_busy);
|
||||||
|
|
||||||
thaw_workqueues();
|
|
||||||
|
|
||||||
read_lock(&tasklist_lock);
|
read_lock(&tasklist_lock);
|
||||||
do_each_thread(g, p) {
|
do_each_thread(g, p) {
|
||||||
task_lock(p);
|
if (!wakeup && !freezer_should_skip(p) &&
|
||||||
if (!wakeup && freezing(p) && !freezer_should_skip(p))
|
p != current && freezing(p) && !frozen(p))
|
||||||
sched_show_task(p);
|
sched_show_task(p);
|
||||||
cancel_freezing(p);
|
|
||||||
task_unlock(p);
|
|
||||||
} while_each_thread(g, p);
|
} while_each_thread(g, p);
|
||||||
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
||||||
} else {
|
} else {
|
||||||
|
@ -136,12 +115,18 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* freeze_processes - Signal user space processes to enter the refrigerator.
|
* freeze_processes - Signal user space processes to enter the refrigerator.
|
||||||
|
*
|
||||||
|
* On success, returns 0. On failure, -errno and system is fully thawed.
|
||||||
*/
|
*/
|
||||||
int freeze_processes(void)
|
int freeze_processes(void)
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
|
if (!pm_freezing)
|
||||||
|
atomic_inc(&system_freezing_cnt);
|
||||||
|
|
||||||
printk("Freezing user space processes ... ");
|
printk("Freezing user space processes ... ");
|
||||||
|
pm_freezing = true;
|
||||||
error = try_to_freeze_tasks(true);
|
error = try_to_freeze_tasks(true);
|
||||||
if (!error) {
|
if (!error) {
|
||||||
printk("done.");
|
printk("done.");
|
||||||
|
@ -150,17 +135,22 @@ int freeze_processes(void)
|
||||||
printk("\n");
|
printk("\n");
|
||||||
BUG_ON(in_atomic());
|
BUG_ON(in_atomic());
|
||||||
|
|
||||||
|
if (error)
|
||||||
|
thaw_processes();
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
|
* freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
|
||||||
|
*
|
||||||
|
* On success, returns 0. On failure, -errno and system is fully thawed.
|
||||||
*/
|
*/
|
||||||
int freeze_kernel_threads(void)
|
int freeze_kernel_threads(void)
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
printk("Freezing remaining freezable tasks ... ");
|
printk("Freezing remaining freezable tasks ... ");
|
||||||
|
pm_nosig_freezing = true;
|
||||||
error = try_to_freeze_tasks(false);
|
error = try_to_freeze_tasks(false);
|
||||||
if (!error)
|
if (!error)
|
||||||
printk("done.");
|
printk("done.");
|
||||||
|
@ -168,37 +158,32 @@ int freeze_kernel_threads(void)
|
||||||
printk("\n");
|
printk("\n");
|
||||||
BUG_ON(in_atomic());
|
BUG_ON(in_atomic());
|
||||||
|
|
||||||
|
if (error)
|
||||||
|
thaw_processes();
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void thaw_tasks(bool nosig_only)
|
|
||||||
{
|
|
||||||
struct task_struct *g, *p;
|
|
||||||
|
|
||||||
read_lock(&tasklist_lock);
|
|
||||||
do_each_thread(g, p) {
|
|
||||||
if (!freezable(p))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (nosig_only && should_send_signal(p))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (cgroup_freezing_or_frozen(p))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
thaw_process(p);
|
|
||||||
} while_each_thread(g, p);
|
|
||||||
read_unlock(&tasklist_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
void thaw_processes(void)
|
void thaw_processes(void)
|
||||||
{
|
{
|
||||||
|
struct task_struct *g, *p;
|
||||||
|
|
||||||
|
if (pm_freezing)
|
||||||
|
atomic_dec(&system_freezing_cnt);
|
||||||
|
pm_freezing = false;
|
||||||
|
pm_nosig_freezing = false;
|
||||||
|
|
||||||
oom_killer_enable();
|
oom_killer_enable();
|
||||||
|
|
||||||
printk("Restarting tasks ... ");
|
printk("Restarting tasks ... ");
|
||||||
|
|
||||||
thaw_workqueues();
|
thaw_workqueues();
|
||||||
thaw_tasks(true);
|
|
||||||
thaw_tasks(false);
|
read_lock(&tasklist_lock);
|
||||||
|
do_each_thread(g, p) {
|
||||||
|
__thaw_task(p);
|
||||||
|
} while_each_thread(g, p);
|
||||||
|
read_unlock(&tasklist_lock);
|
||||||
|
|
||||||
schedule();
|
schedule();
|
||||||
printk("done.\n");
|
printk("done.\n");
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,9 +42,9 @@ static const struct platform_suspend_ops *suspend_ops;
|
||||||
*/
|
*/
|
||||||
void suspend_set_ops(const struct platform_suspend_ops *ops)
|
void suspend_set_ops(const struct platform_suspend_ops *ops)
|
||||||
{
|
{
|
||||||
mutex_lock(&pm_mutex);
|
lock_system_sleep();
|
||||||
suspend_ops = ops;
|
suspend_ops = ops;
|
||||||
mutex_unlock(&pm_mutex);
|
unlock_system_sleep();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(suspend_set_ops);
|
EXPORT_SYMBOL_GPL(suspend_set_ops);
|
||||||
|
|
||||||
|
@ -106,13 +106,11 @@ static int suspend_prepare(void)
|
||||||
goto Finish;
|
goto Finish;
|
||||||
|
|
||||||
error = suspend_freeze_processes();
|
error = suspend_freeze_processes();
|
||||||
if (error) {
|
if (!error)
|
||||||
suspend_stats.failed_freeze++;
|
|
||||||
dpm_save_failed_step(SUSPEND_FREEZE);
|
|
||||||
} else
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
suspend_thaw_processes();
|
suspend_stats.failed_freeze++;
|
||||||
|
dpm_save_failed_step(SUSPEND_FREEZE);
|
||||||
usermodehelper_enable();
|
usermodehelper_enable();
|
||||||
Finish:
|
Finish:
|
||||||
pm_notifier_call_chain(PM_POST_SUSPEND);
|
pm_notifier_call_chain(PM_POST_SUSPEND);
|
||||||
|
|
|
@ -30,28 +30,6 @@
|
||||||
|
|
||||||
#include "power.h"
|
#include "power.h"
|
||||||
|
|
||||||
/*
|
|
||||||
* NOTE: The SNAPSHOT_SET_SWAP_FILE and SNAPSHOT_PMOPS ioctls are obsolete and
|
|
||||||
* will be removed in the future. They are only preserved here for
|
|
||||||
* compatibility with existing userland utilities.
|
|
||||||
*/
|
|
||||||
#define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
|
|
||||||
#define SNAPSHOT_PMOPS _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
|
|
||||||
|
|
||||||
#define PMOPS_PREPARE 1
|
|
||||||
#define PMOPS_ENTER 2
|
|
||||||
#define PMOPS_FINISH 3
|
|
||||||
|
|
||||||
/*
|
|
||||||
* NOTE: The following ioctl definitions are wrong and have been replaced with
|
|
||||||
* correct ones. They are only preserved here for compatibility with existing
|
|
||||||
* userland utilities and will be removed in the future.
|
|
||||||
*/
|
|
||||||
#define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *)
|
|
||||||
#define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long)
|
|
||||||
#define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *)
|
|
||||||
#define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *)
|
|
||||||
|
|
||||||
|
|
||||||
#define SNAPSHOT_MINOR 231
|
#define SNAPSHOT_MINOR 231
|
||||||
|
|
||||||
|
@ -71,7 +49,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
|
||||||
struct snapshot_data *data;
|
struct snapshot_data *data;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
mutex_lock(&pm_mutex);
|
lock_system_sleep();
|
||||||
|
|
||||||
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
|
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
|
||||||
error = -EBUSY;
|
error = -EBUSY;
|
||||||
|
@ -123,7 +101,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
|
||||||
data->platform_support = 0;
|
data->platform_support = 0;
|
||||||
|
|
||||||
Unlock:
|
Unlock:
|
||||||
mutex_unlock(&pm_mutex);
|
unlock_system_sleep();
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
@ -132,7 +110,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
|
||||||
{
|
{
|
||||||
struct snapshot_data *data;
|
struct snapshot_data *data;
|
||||||
|
|
||||||
mutex_lock(&pm_mutex);
|
lock_system_sleep();
|
||||||
|
|
||||||
swsusp_free();
|
swsusp_free();
|
||||||
free_basic_memory_bitmaps();
|
free_basic_memory_bitmaps();
|
||||||
|
@ -146,7 +124,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
|
||||||
PM_POST_HIBERNATION : PM_POST_RESTORE);
|
PM_POST_HIBERNATION : PM_POST_RESTORE);
|
||||||
atomic_inc(&snapshot_device_available);
|
atomic_inc(&snapshot_device_available);
|
||||||
|
|
||||||
mutex_unlock(&pm_mutex);
|
unlock_system_sleep();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -158,7 +136,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
|
||||||
ssize_t res;
|
ssize_t res;
|
||||||
loff_t pg_offp = *offp & ~PAGE_MASK;
|
loff_t pg_offp = *offp & ~PAGE_MASK;
|
||||||
|
|
||||||
mutex_lock(&pm_mutex);
|
lock_system_sleep();
|
||||||
|
|
||||||
data = filp->private_data;
|
data = filp->private_data;
|
||||||
if (!data->ready) {
|
if (!data->ready) {
|
||||||
|
@ -179,7 +157,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
|
||||||
*offp += res;
|
*offp += res;
|
||||||
|
|
||||||
Unlock:
|
Unlock:
|
||||||
mutex_unlock(&pm_mutex);
|
unlock_system_sleep();
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -191,7 +169,7 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
|
||||||
ssize_t res;
|
ssize_t res;
|
||||||
loff_t pg_offp = *offp & ~PAGE_MASK;
|
loff_t pg_offp = *offp & ~PAGE_MASK;
|
||||||
|
|
||||||
mutex_lock(&pm_mutex);
|
lock_system_sleep();
|
||||||
|
|
||||||
data = filp->private_data;
|
data = filp->private_data;
|
||||||
|
|
||||||
|
@ -208,20 +186,11 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
|
||||||
if (res > 0)
|
if (res > 0)
|
||||||
*offp += res;
|
*offp += res;
|
||||||
unlock:
|
unlock:
|
||||||
mutex_unlock(&pm_mutex);
|
unlock_system_sleep();
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void snapshot_deprecated_ioctl(unsigned int cmd)
|
|
||||||
{
|
|
||||||
if (printk_ratelimit())
|
|
||||||
printk(KERN_NOTICE "%pf: ioctl '%.8x' is deprecated and will "
|
|
||||||
"be removed soon, update your suspend-to-disk "
|
|
||||||
"utilities\n",
|
|
||||||
__builtin_return_address(0), cmd);
|
|
||||||
}
|
|
||||||
|
|
||||||
static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||||
unsigned long arg)
|
unsigned long arg)
|
||||||
{
|
{
|
||||||
|
@ -257,11 +226,9 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
error = freeze_processes();
|
error = freeze_processes();
|
||||||
if (error) {
|
if (error)
|
||||||
thaw_processes();
|
|
||||||
usermodehelper_enable();
|
usermodehelper_enable();
|
||||||
}
|
else
|
||||||
if (!error)
|
|
||||||
data->frozen = 1;
|
data->frozen = 1;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -274,8 +241,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||||
data->frozen = 0;
|
data->frozen = 0;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SNAPSHOT_ATOMIC_SNAPSHOT:
|
|
||||||
snapshot_deprecated_ioctl(cmd);
|
|
||||||
case SNAPSHOT_CREATE_IMAGE:
|
case SNAPSHOT_CREATE_IMAGE:
|
||||||
if (data->mode != O_RDONLY || !data->frozen || data->ready) {
|
if (data->mode != O_RDONLY || !data->frozen || data->ready) {
|
||||||
error = -EPERM;
|
error = -EPERM;
|
||||||
|
@ -283,10 +248,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||||
}
|
}
|
||||||
pm_restore_gfp_mask();
|
pm_restore_gfp_mask();
|
||||||
error = hibernation_snapshot(data->platform_support);
|
error = hibernation_snapshot(data->platform_support);
|
||||||
if (!error)
|
if (!error) {
|
||||||
error = put_user(in_suspend, (int __user *)arg);
|
error = put_user(in_suspend, (int __user *)arg);
|
||||||
if (!error)
|
if (!error && !freezer_test_done)
|
||||||
data->ready = 1;
|
data->ready = 1;
|
||||||
|
if (freezer_test_done) {
|
||||||
|
freezer_test_done = false;
|
||||||
|
thaw_processes();
|
||||||
|
}
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SNAPSHOT_ATOMIC_RESTORE:
|
case SNAPSHOT_ATOMIC_RESTORE:
|
||||||
|
@ -305,8 +275,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||||
data->ready = 0;
|
data->ready = 0;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SNAPSHOT_SET_IMAGE_SIZE:
|
|
||||||
snapshot_deprecated_ioctl(cmd);
|
|
||||||
case SNAPSHOT_PREF_IMAGE_SIZE:
|
case SNAPSHOT_PREF_IMAGE_SIZE:
|
||||||
image_size = arg;
|
image_size = arg;
|
||||||
break;
|
break;
|
||||||
|
@ -321,16 +289,12 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||||
error = put_user(size, (loff_t __user *)arg);
|
error = put_user(size, (loff_t __user *)arg);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SNAPSHOT_AVAIL_SWAP:
|
|
||||||
snapshot_deprecated_ioctl(cmd);
|
|
||||||
case SNAPSHOT_AVAIL_SWAP_SIZE:
|
case SNAPSHOT_AVAIL_SWAP_SIZE:
|
||||||
size = count_swap_pages(data->swap, 1);
|
size = count_swap_pages(data->swap, 1);
|
||||||
size <<= PAGE_SHIFT;
|
size <<= PAGE_SHIFT;
|
||||||
error = put_user(size, (loff_t __user *)arg);
|
error = put_user(size, (loff_t __user *)arg);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SNAPSHOT_GET_SWAP_PAGE:
|
|
||||||
snapshot_deprecated_ioctl(cmd);
|
|
||||||
case SNAPSHOT_ALLOC_SWAP_PAGE:
|
case SNAPSHOT_ALLOC_SWAP_PAGE:
|
||||||
if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
|
if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
|
||||||
error = -ENODEV;
|
error = -ENODEV;
|
||||||
|
@ -353,27 +317,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||||
free_all_swap_pages(data->swap);
|
free_all_swap_pages(data->swap);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SNAPSHOT_SET_SWAP_FILE: /* This ioctl is deprecated */
|
|
||||||
snapshot_deprecated_ioctl(cmd);
|
|
||||||
if (!swsusp_swap_in_use()) {
|
|
||||||
/*
|
|
||||||
* User space encodes device types as two-byte values,
|
|
||||||
* so we need to recode them
|
|
||||||
*/
|
|
||||||
if (old_decode_dev(arg)) {
|
|
||||||
data->swap = swap_type_of(old_decode_dev(arg),
|
|
||||||
0, NULL);
|
|
||||||
if (data->swap < 0)
|
|
||||||
error = -ENODEV;
|
|
||||||
} else {
|
|
||||||
data->swap = -1;
|
|
||||||
error = -EINVAL;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
error = -EPERM;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case SNAPSHOT_S2RAM:
|
case SNAPSHOT_S2RAM:
|
||||||
if (!data->frozen) {
|
if (!data->frozen) {
|
||||||
error = -EPERM;
|
error = -EPERM;
|
||||||
|
@ -396,33 +339,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||||
error = hibernation_platform_enter();
|
error = hibernation_platform_enter();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SNAPSHOT_PMOPS: /* This ioctl is deprecated */
|
|
||||||
snapshot_deprecated_ioctl(cmd);
|
|
||||||
error = -EINVAL;
|
|
||||||
|
|
||||||
switch (arg) {
|
|
||||||
|
|
||||||
case PMOPS_PREPARE:
|
|
||||||
data->platform_support = 1;
|
|
||||||
error = 0;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case PMOPS_ENTER:
|
|
||||||
if (data->platform_support)
|
|
||||||
error = hibernation_platform_enter();
|
|
||||||
break;
|
|
||||||
|
|
||||||
case PMOPS_FINISH:
|
|
||||||
if (data->platform_support)
|
|
||||||
error = 0;
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg);
|
|
||||||
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case SNAPSHOT_SET_SWAP_AREA:
|
case SNAPSHOT_SET_SWAP_AREA:
|
||||||
if (swsusp_swap_in_use()) {
|
if (swsusp_swap_in_use()) {
|
||||||
error = -EPERM;
|
error = -EPERM;
|
||||||
|
|
|
@ -600,14 +600,10 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Finally, kill the kernel thread. We don't need to be RCU
|
* Finally, kill the kernel thread. We don't need to be RCU
|
||||||
* safe anymore, since the bdi is gone from visibility. Force
|
* safe anymore, since the bdi is gone from visibility.
|
||||||
* unfreeze of the thread before calling kthread_stop(), otherwise
|
|
||||||
* it would never exet if it is currently stuck in the refrigerator.
|
|
||||||
*/
|
*/
|
||||||
if (bdi->wb.task) {
|
if (bdi->wb.task)
|
||||||
thaw_process(bdi->wb.task);
|
|
||||||
kthread_stop(bdi->wb.task);
|
kthread_stop(bdi->wb.task);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -328,7 +328,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
|
||||||
*/
|
*/
|
||||||
if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
|
if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
|
||||||
if (unlikely(frozen(p)))
|
if (unlikely(frozen(p)))
|
||||||
thaw_process(p);
|
__thaw_task(p);
|
||||||
return ERR_PTR(-1UL);
|
return ERR_PTR(-1UL);
|
||||||
}
|
}
|
||||||
if (!p->mm)
|
if (!p->mm)
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
|
#include <linux/freezer.h>
|
||||||
|
|
||||||
#include <linux/sunrpc/clnt.h>
|
#include <linux/sunrpc/clnt.h>
|
||||||
|
|
||||||
|
@ -231,7 +232,7 @@ static int rpc_wait_bit_killable(void *word)
|
||||||
{
|
{
|
||||||
if (fatal_signal_pending(current))
|
if (fatal_signal_pending(current))
|
||||||
return -ERESTARTSYS;
|
return -ERESTARTSYS;
|
||||||
schedule();
|
freezable_schedule();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче