Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
Pull ACPI & Power Management changes from Len Brown: - ACPI 5.0 after-ripples, ACPICA/Linux divergence cleanup - cpuidle evolving, more ARM use - thermal sub-system evolving, ditto - assorted other PM bits Fix up conflicts in various cpuidle implementations due to ARM cpuidle cleanups (ARM at91 self-refresh and cpu idle code rewritten into "standby" in asm conflicting with the consolidation of cpuidle time keeping), trivial SH include file context conflict and RCU tracing fixes in generic code. * 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux: (77 commits) ACPI throttling: fix endian bug in acpi_read_throttling_status() Disable MCP limit exceeded messages from Intel IPS driver ACPI video: Don't start video device until its associated input device has been allocated ACPI video: Harden video bus adding. ACPI: Add support for exposing BGRT data ACPI: export acpi_kobj ACPI: Fix logic for removing mappings in 'acpi_unmap' CPER failed to handle generic error records with multiple sections ACPI: Clean redundant codes in scan.c ACPI: Fix unprotected smp_processor_id() in acpi_processor_cst_has_changed() ACPI: consistently use should_use_kmap() PNPACPI: Fix device ref leaking in acpi_pnp_match ACPI: Fix use-after-free in acpi_map_lsapic ACPI: processor_driver: add missing kfree ACPI, APEI: Fix incorrect APEI register bit width check and usage Update documentation for parameter *notrigger* in einj.txt ACPI, APEI, EINJ, new parameter to control trigger action ACPI, APEI, EINJ, limit the range of einj_param ACPI, APEI, Fix ERST header length check cpuidle: power_usage should be declared signed integer ...
This commit is contained in:
Коммит
a335750b9a
|
@ -1,3 +1,23 @@
|
|||
What: /sys/firmware/acpi/bgrt/
|
||||
Date: January 2012
|
||||
Contact: Matthew Garrett <mjg@redhat.com>
|
||||
Description:
|
||||
The BGRT is an ACPI 5.0 feature that allows the OS
|
||||
to obtain a copy of the firmware boot splash and
|
||||
some associated metadata. This is intended to be used
|
||||
by boot splash applications in order to interact with
|
||||
the firmware boot splash in order to avoid jarring
|
||||
transitions.
|
||||
|
||||
image: The image bitmap. Currently a 32-bit BMP.
|
||||
status: 1 if the image is valid, 0 if firmware invalidated it.
|
||||
type: 0 indicates image is in BMP format.
|
||||
version: The version of the BGRT. Currently 1.
|
||||
xoffset: The number of pixels between the left of the screen
|
||||
and the left edge of the image.
|
||||
yoffset: The number of pixels between the top of the screen
|
||||
and the top edge of the image.
|
||||
|
||||
What: /sys/firmware/acpi/interrupts/
|
||||
Date: February 2008
|
||||
Contact: Len Brown <lenb@kernel.org>
|
||||
|
|
|
@ -53,6 +53,14 @@ directory apei/einj. The following files are provided.
|
|||
This file is used to set the second error parameter value. Effect of
|
||||
parameter depends on error_type specified.
|
||||
|
||||
- notrigger
|
||||
The EINJ mechanism is a two step process. First inject the error, then
|
||||
perform some actions to trigger it. Setting "notrigger" to 1 skips the
|
||||
trigger phase, which *may* allow the user to cause the error in some other
|
||||
context by a simple access to the cpu, memory location, or device that is
|
||||
the target of the error injection. Whether this actually works depends
|
||||
on what operations the BIOS actually includes in the trigger phase.
|
||||
|
||||
BIOS versions based in the ACPI 4.0 specification have limited options
|
||||
to control where the errors are injected. Your BIOS may support an
|
||||
extension (enabled with the param_extension=1 module parameter, or
|
||||
|
|
|
@ -36,6 +36,7 @@ drwxr-xr-x 2 root root 0 Feb 8 10:42 state3
|
|||
/sys/devices/system/cpu/cpu0/cpuidle/state0:
|
||||
total 0
|
||||
-r--r--r-- 1 root root 4096 Feb 8 10:42 desc
|
||||
-rw-r--r-- 1 root root 4096 Feb 8 10:42 disable
|
||||
-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
|
||||
-r--r--r-- 1 root root 4096 Feb 8 10:42 name
|
||||
-r--r--r-- 1 root root 4096 Feb 8 10:42 power
|
||||
|
@ -45,6 +46,7 @@ total 0
|
|||
/sys/devices/system/cpu/cpu0/cpuidle/state1:
|
||||
total 0
|
||||
-r--r--r-- 1 root root 4096 Feb 8 10:42 desc
|
||||
-rw-r--r-- 1 root root 4096 Feb 8 10:42 disable
|
||||
-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
|
||||
-r--r--r-- 1 root root 4096 Feb 8 10:42 name
|
||||
-r--r--r-- 1 root root 4096 Feb 8 10:42 power
|
||||
|
@ -54,6 +56,7 @@ total 0
|
|||
/sys/devices/system/cpu/cpu0/cpuidle/state2:
|
||||
total 0
|
||||
-r--r--r-- 1 root root 4096 Feb 8 10:42 desc
|
||||
-rw-r--r-- 1 root root 4096 Feb 8 10:42 disable
|
||||
-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
|
||||
-r--r--r-- 1 root root 4096 Feb 8 10:42 name
|
||||
-r--r--r-- 1 root root 4096 Feb 8 10:42 power
|
||||
|
@ -63,6 +66,7 @@ total 0
|
|||
/sys/devices/system/cpu/cpu0/cpuidle/state3:
|
||||
total 0
|
||||
-r--r--r-- 1 root root 4096 Feb 8 10:42 desc
|
||||
-rw-r--r-- 1 root root 4096 Feb 8 10:42 disable
|
||||
-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
|
||||
-r--r--r-- 1 root root 4096 Feb 8 10:42 name
|
||||
-r--r--r-- 1 root root 4096 Feb 8 10:42 power
|
||||
|
@ -72,6 +76,7 @@ total 0
|
|||
|
||||
|
||||
* desc : Small description about the idle state (string)
|
||||
* disable : Option to disable this idle state (bool)
|
||||
* latency : Latency to exit out of this idle state (in microseconds)
|
||||
* name : Name of the idle state (string)
|
||||
* power : Power consumed while in this idle state (in milliwatts)
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
#ifndef __ASM_ARM_CPUIDLE_H
|
||||
#define __ASM_ARM_CPUIDLE_H
|
||||
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index);
|
||||
#else
|
||||
static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index) { return -ENODEV; }
|
||||
#endif
|
||||
|
||||
/* Common ARM WFI state */
|
||||
#define ARM_CPUIDLE_WFI_STATE_PWR(p) {\
|
||||
.enter = arm_cpuidle_simple_enter,\
|
||||
.exit_latency = 1,\
|
||||
.target_residency = 1,\
|
||||
.power_usage = p,\
|
||||
.flags = CPUIDLE_FLAG_TIME_VALID,\
|
||||
.name = "WFI",\
|
||||
.desc = "ARM WFI",\
|
||||
}
|
||||
|
||||
/*
|
||||
* in case power_specified == 1, give a default WFI power value needed
|
||||
* by some governors
|
||||
*/
|
||||
#define ARM_CPUIDLE_WFI_STATE ARM_CPUIDLE_WFI_STATE_PWR(UINT_MAX)
|
||||
|
||||
#endif
|
|
@ -23,7 +23,7 @@ obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += compat.o
|
|||
|
||||
obj-$(CONFIG_LEDS) += leds.o
|
||||
obj-$(CONFIG_OC_ETM) += etm.o
|
||||
|
||||
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
|
||||
obj-$(CONFIG_ISA_DMA_API) += dma.o
|
||||
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
|
||||
obj-$(CONFIG_MODULES) += armksyms.o module.o
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
/*
|
||||
* Copyright 2012 Linaro Ltd.
|
||||
*
|
||||
* The code contained herein is licensed under the GNU General Public
|
||||
* License. You may obtain a copy of the GNU General Public License
|
||||
* Version 2 or later at the following locations:
|
||||
*
|
||||
* http://www.opensource.org/licenses/gpl-license.html
|
||||
* http://www.gnu.org/copyleft/gpl.html
|
||||
*/
|
||||
|
||||
#include <linux/cpuidle.h>
|
||||
#include <asm/proc-fns.h>
|
||||
|
||||
int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
{
|
||||
cpu_do_idle();
|
||||
|
||||
return index;
|
||||
}
|
|
@ -17,9 +17,10 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <asm/proc-fns.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/proc-fns.h>
|
||||
#include <asm/cpuidle.h>
|
||||
|
||||
#include "pm.h"
|
||||
|
||||
|
@ -27,61 +28,39 @@
|
|||
|
||||
static DEFINE_PER_CPU(struct cpuidle_device, at91_cpuidle_device);
|
||||
|
||||
static struct cpuidle_driver at91_idle_driver = {
|
||||
.name = "at91_idle",
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
/* Actual code that puts the SoC in different idle states */
|
||||
static int at91_enter_idle(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv,
|
||||
int index)
|
||||
{
|
||||
struct timeval before, after;
|
||||
int idle_time;
|
||||
at91_standby();
|
||||
|
||||
local_irq_disable();
|
||||
do_gettimeofday(&before);
|
||||
if (index == 0)
|
||||
/* Wait for interrupt state */
|
||||
cpu_do_idle();
|
||||
else if (index == 1)
|
||||
at91_standby();
|
||||
|
||||
do_gettimeofday(&after);
|
||||
local_irq_enable();
|
||||
idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
|
||||
(after.tv_usec - before.tv_usec);
|
||||
|
||||
dev->last_residency = idle_time;
|
||||
return index;
|
||||
}
|
||||
|
||||
static struct cpuidle_driver at91_idle_driver = {
|
||||
.name = "at91_idle",
|
||||
.owner = THIS_MODULE,
|
||||
.en_core_tk_irqen = 1,
|
||||
.states[0] = ARM_CPUIDLE_WFI_STATE,
|
||||
.states[1] = {
|
||||
.enter = at91_enter_idle,
|
||||
.exit_latency = 10,
|
||||
.target_residency = 100000,
|
||||
.flags = CPUIDLE_FLAG_TIME_VALID,
|
||||
.name = "RAM_SR",
|
||||
.desc = "WFI and DDR Self Refresh",
|
||||
},
|
||||
.state_count = AT91_MAX_STATES,
|
||||
};
|
||||
|
||||
/* Initialize CPU idle by registering the idle states */
|
||||
static int at91_init_cpuidle(void)
|
||||
{
|
||||
struct cpuidle_device *device;
|
||||
struct cpuidle_driver *driver = &at91_idle_driver;
|
||||
|
||||
device = &per_cpu(at91_cpuidle_device, smp_processor_id());
|
||||
device->state_count = AT91_MAX_STATES;
|
||||
driver->state_count = AT91_MAX_STATES;
|
||||
|
||||
/* Wait for interrupt state */
|
||||
driver->states[0].enter = at91_enter_idle;
|
||||
driver->states[0].exit_latency = 1;
|
||||
driver->states[0].target_residency = 10000;
|
||||
driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
|
||||
strcpy(driver->states[0].name, "WFI");
|
||||
strcpy(driver->states[0].desc, "Wait for interrupt");
|
||||
|
||||
/* Wait for interrupt and RAM self refresh state */
|
||||
driver->states[1].enter = at91_enter_idle;
|
||||
driver->states[1].exit_latency = 10;
|
||||
driver->states[1].target_residency = 10000;
|
||||
driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
|
||||
strcpy(driver->states[1].name, "RAM_SR");
|
||||
strcpy(driver->states[1].desc, "WFI and RAM Self Refresh");
|
||||
|
||||
cpuidle_register_driver(&at91_idle_driver);
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/proc-fns.h>
|
||||
#include <asm/cpuidle.h>
|
||||
|
||||
#include <mach/cpuidle.h>
|
||||
#include <mach/ddr2.h>
|
||||
|
@ -30,12 +31,43 @@ struct davinci_ops {
|
|||
u32 flags;
|
||||
};
|
||||
|
||||
/* Actual code that puts the SoC in different idle states */
|
||||
static int davinci_enter_idle(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv,
|
||||
int index)
|
||||
{
|
||||
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
|
||||
struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
|
||||
|
||||
if (ops && ops->enter)
|
||||
ops->enter(ops->flags);
|
||||
|
||||
index = cpuidle_wrap_enter(dev, drv, index,
|
||||
arm_cpuidle_simple_enter);
|
||||
|
||||
if (ops && ops->exit)
|
||||
ops->exit(ops->flags);
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
/* fields in davinci_ops.flags */
|
||||
#define DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN BIT(0)
|
||||
|
||||
static struct cpuidle_driver davinci_idle_driver = {
|
||||
.name = "cpuidle-davinci",
|
||||
.owner = THIS_MODULE,
|
||||
.name = "cpuidle-davinci",
|
||||
.owner = THIS_MODULE,
|
||||
.en_core_tk_irqen = 1,
|
||||
.states[0] = ARM_CPUIDLE_WFI_STATE,
|
||||
.states[1] = {
|
||||
.enter = davinci_enter_idle,
|
||||
.exit_latency = 10,
|
||||
.target_residency = 100000,
|
||||
.flags = CPUIDLE_FLAG_TIME_VALID,
|
||||
.name = "DDR SR",
|
||||
.desc = "WFI and DDR Self Refresh",
|
||||
},
|
||||
.state_count = DAVINCI_CPUIDLE_MAX_STATES,
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
|
||||
|
@ -77,41 +109,10 @@ static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = {
|
|||
},
|
||||
};
|
||||
|
||||
/* Actual code that puts the SoC in different idle states */
|
||||
static int davinci_enter_idle(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv,
|
||||
int index)
|
||||
{
|
||||
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
|
||||
struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
|
||||
struct timeval before, after;
|
||||
int idle_time;
|
||||
|
||||
local_irq_disable();
|
||||
do_gettimeofday(&before);
|
||||
|
||||
if (ops && ops->enter)
|
||||
ops->enter(ops->flags);
|
||||
/* Wait for interrupt state */
|
||||
cpu_do_idle();
|
||||
if (ops && ops->exit)
|
||||
ops->exit(ops->flags);
|
||||
|
||||
do_gettimeofday(&after);
|
||||
local_irq_enable();
|
||||
idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
|
||||
(after.tv_usec - before.tv_usec);
|
||||
|
||||
dev->last_residency = idle_time;
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
static int __init davinci_cpuidle_probe(struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
struct cpuidle_device *device;
|
||||
struct cpuidle_driver *driver = &davinci_idle_driver;
|
||||
struct davinci_cpuidle_config *pdata = pdev->dev.platform_data;
|
||||
|
||||
device = &per_cpu(davinci_cpuidle_device, smp_processor_id());
|
||||
|
@ -123,27 +124,11 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
|
|||
|
||||
ddr2_reg_base = pdata->ddr2_ctlr_base;
|
||||
|
||||
/* Wait for interrupt state */
|
||||
driver->states[0].enter = davinci_enter_idle;
|
||||
driver->states[0].exit_latency = 1;
|
||||
driver->states[0].target_residency = 10000;
|
||||
driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
|
||||
strcpy(driver->states[0].name, "WFI");
|
||||
strcpy(driver->states[0].desc, "Wait for interrupt");
|
||||
|
||||
/* Wait for interrupt and DDR self refresh state */
|
||||
driver->states[1].enter = davinci_enter_idle;
|
||||
driver->states[1].exit_latency = 10;
|
||||
driver->states[1].target_residency = 10000;
|
||||
driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
|
||||
strcpy(driver->states[1].name, "DDR SR");
|
||||
strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
|
||||
if (pdata->ddr2_pdown)
|
||||
davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
|
||||
cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]);
|
||||
|
||||
device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
|
||||
driver->state_count = DAVINCI_CPUIDLE_MAX_STATES;
|
||||
|
||||
ret = cpuidle_register_driver(&davinci_idle_driver);
|
||||
if (ret) {
|
||||
|
|
|
@ -20,77 +20,47 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/proc-fns.h>
|
||||
#include <asm/cpuidle.h>
|
||||
#include <mach/kirkwood.h>
|
||||
|
||||
#define KIRKWOOD_MAX_STATES 2
|
||||
|
||||
static struct cpuidle_driver kirkwood_idle_driver = {
|
||||
.name = "kirkwood_idle",
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
|
||||
|
||||
/* Actual code that puts the SoC in different idle states */
|
||||
static int kirkwood_enter_idle(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv,
|
||||
int index)
|
||||
{
|
||||
struct timeval before, after;
|
||||
int idle_time;
|
||||
|
||||
local_irq_disable();
|
||||
do_gettimeofday(&before);
|
||||
if (index == 0)
|
||||
/* Wait for interrupt state */
|
||||
cpu_do_idle();
|
||||
else if (index == 1) {
|
||||
/*
|
||||
* Following write will put DDR in self refresh.
|
||||
* Note that we have 256 cycles before DDR puts it
|
||||
* self in self-refresh, so the wait-for-interrupt
|
||||
* call afterwards won't get the DDR from self refresh
|
||||
* mode.
|
||||
*/
|
||||
writel(0x7, DDR_OPERATION_BASE);
|
||||
cpu_do_idle();
|
||||
}
|
||||
do_gettimeofday(&after);
|
||||
local_irq_enable();
|
||||
idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
|
||||
(after.tv_usec - before.tv_usec);
|
||||
|
||||
/* Update last residency */
|
||||
dev->last_residency = idle_time;
|
||||
writel(0x7, DDR_OPERATION_BASE);
|
||||
cpu_do_idle();
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
static struct cpuidle_driver kirkwood_idle_driver = {
|
||||
.name = "kirkwood_idle",
|
||||
.owner = THIS_MODULE,
|
||||
.en_core_tk_irqen = 1,
|
||||
.states[0] = ARM_CPUIDLE_WFI_STATE,
|
||||
.states[1] = {
|
||||
.enter = kirkwood_enter_idle,
|
||||
.exit_latency = 10,
|
||||
.target_residency = 100000,
|
||||
.flags = CPUIDLE_FLAG_TIME_VALID,
|
||||
.name = "DDR SR",
|
||||
.desc = "WFI and DDR Self Refresh",
|
||||
},
|
||||
.state_count = KIRKWOOD_MAX_STATES,
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
|
||||
|
||||
/* Initialize CPU idle by registering the idle states */
|
||||
static int kirkwood_init_cpuidle(void)
|
||||
{
|
||||
struct cpuidle_device *device;
|
||||
struct cpuidle_driver *driver = &kirkwood_idle_driver;
|
||||
|
||||
device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id());
|
||||
device->state_count = KIRKWOOD_MAX_STATES;
|
||||
driver->state_count = KIRKWOOD_MAX_STATES;
|
||||
|
||||
/* Wait for interrupt state */
|
||||
driver->states[0].enter = kirkwood_enter_idle;
|
||||
driver->states[0].exit_latency = 1;
|
||||
driver->states[0].target_residency = 10000;
|
||||
driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
|
||||
strcpy(driver->states[0].name, "WFI");
|
||||
strcpy(driver->states[0].desc, "Wait for interrupt");
|
||||
|
||||
/* Wait for interrupt and DDR self refresh state */
|
||||
driver->states[1].enter = kirkwood_enter_idle;
|
||||
driver->states[1].exit_latency = 10;
|
||||
driver->states[1].target_residency = 10000;
|
||||
driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
|
||||
strcpy(driver->states[1].name, "DDR SR");
|
||||
strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
|
||||
|
||||
cpuidle_register_driver(&kirkwood_idle_driver);
|
||||
if (cpuidle_register_device(device)) {
|
||||
|
|
|
@ -87,29 +87,14 @@ static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* omap3_enter_idle - Programs OMAP3 to enter the specified state
|
||||
* @dev: cpuidle device
|
||||
* @drv: cpuidle driver
|
||||
* @index: the index of state to be entered
|
||||
*
|
||||
* Called from the CPUidle framework to program the device to the
|
||||
* specified target state selected by the governor.
|
||||
*/
|
||||
static int omap3_enter_idle(struct cpuidle_device *dev,
|
||||
static int __omap3_enter_idle(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv,
|
||||
int index)
|
||||
{
|
||||
struct omap3_idle_statedata *cx =
|
||||
cpuidle_get_statedata(&dev->states_usage[index]);
|
||||
struct timespec ts_preidle, ts_postidle, ts_idle;
|
||||
u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
|
||||
int idle_time;
|
||||
|
||||
/* Used to keep track of the total time in idle */
|
||||
getnstimeofday(&ts_preidle);
|
||||
|
||||
local_irq_disable();
|
||||
local_fiq_disable();
|
||||
|
||||
pwrdm_set_next_pwrst(mpu_pd, mpu_state);
|
||||
|
@ -148,21 +133,28 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
|
|||
}
|
||||
|
||||
return_sleep_time:
|
||||
getnstimeofday(&ts_postidle);
|
||||
ts_idle = timespec_sub(ts_postidle, ts_preidle);
|
||||
|
||||
local_irq_enable();
|
||||
local_fiq_enable();
|
||||
|
||||
idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
|
||||
USEC_PER_SEC;
|
||||
|
||||
/* Update cpuidle counters */
|
||||
dev->last_residency = idle_time;
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
/**
|
||||
* omap3_enter_idle - Programs OMAP3 to enter the specified state
|
||||
* @dev: cpuidle device
|
||||
* @drv: cpuidle driver
|
||||
* @index: the index of state to be entered
|
||||
*
|
||||
* Called from the CPUidle framework to program the device to the
|
||||
* specified target state selected by the governor.
|
||||
*/
|
||||
static inline int omap3_enter_idle(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv,
|
||||
int index)
|
||||
{
|
||||
return cpuidle_wrap_enter(dev, drv, index, __omap3_enter_idle);
|
||||
}
|
||||
|
||||
/**
|
||||
* next_valid_state - Find next valid C-state
|
||||
* @dev: cpuidle device
|
||||
|
|
|
@ -62,15 +62,9 @@ static int omap4_enter_idle(struct cpuidle_device *dev,
|
|||
{
|
||||
struct omap4_idle_statedata *cx =
|
||||
cpuidle_get_statedata(&dev->states_usage[index]);
|
||||
struct timespec ts_preidle, ts_postidle, ts_idle;
|
||||
u32 cpu1_state;
|
||||
int idle_time;
|
||||
int cpu_id = smp_processor_id();
|
||||
|
||||
/* Used to keep track of the total time in idle */
|
||||
getnstimeofday(&ts_preidle);
|
||||
|
||||
local_irq_disable();
|
||||
local_fiq_disable();
|
||||
|
||||
/*
|
||||
|
@ -128,26 +122,17 @@ static int omap4_enter_idle(struct cpuidle_device *dev,
|
|||
if (index > 0)
|
||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
|
||||
|
||||
getnstimeofday(&ts_postidle);
|
||||
ts_idle = timespec_sub(ts_postidle, ts_preidle);
|
||||
|
||||
local_irq_enable();
|
||||
local_fiq_enable();
|
||||
|
||||
idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
|
||||
USEC_PER_SEC;
|
||||
|
||||
/* Update cpuidle counters */
|
||||
dev->last_residency = idle_time;
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
|
||||
|
||||
struct cpuidle_driver omap4_idle_driver = {
|
||||
.name = "omap4_idle",
|
||||
.owner = THIS_MODULE,
|
||||
.name = "omap4_idle",
|
||||
.owner = THIS_MODULE,
|
||||
.en_core_tk_irqen = 1,
|
||||
};
|
||||
|
||||
static inline void _fill_cstate(struct cpuidle_driver *drv,
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/suspend.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/err.h>
|
||||
#include <asm/cpuidle.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
static void shmobile_enter_wfi(void)
|
||||
|
@ -28,37 +29,19 @@ static int shmobile_cpuidle_enter(struct cpuidle_device *dev,
|
|||
struct cpuidle_driver *drv,
|
||||
int index)
|
||||
{
|
||||
ktime_t before, after;
|
||||
|
||||
before = ktime_get();
|
||||
|
||||
local_irq_disable();
|
||||
local_fiq_disable();
|
||||
|
||||
shmobile_cpuidle_modes[index]();
|
||||
|
||||
local_irq_enable();
|
||||
local_fiq_enable();
|
||||
|
||||
after = ktime_get();
|
||||
dev->last_residency = ktime_to_ns(ktime_sub(after, before)) >> 10;
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
static struct cpuidle_device shmobile_cpuidle_dev;
|
||||
static struct cpuidle_driver shmobile_cpuidle_driver = {
|
||||
.name = "shmobile_cpuidle",
|
||||
.owner = THIS_MODULE,
|
||||
.states[0] = {
|
||||
.name = "C1",
|
||||
.desc = "WFI",
|
||||
.exit_latency = 1,
|
||||
.target_residency = 1 * 2,
|
||||
.flags = CPUIDLE_FLAG_TIME_VALID,
|
||||
},
|
||||
.safe_state_index = 0, /* C1 */
|
||||
.state_count = 1,
|
||||
.name = "shmobile_cpuidle",
|
||||
.owner = THIS_MODULE,
|
||||
.en_core_tk_irqen = 1,
|
||||
.states[0] = ARM_CPUIDLE_WFI_STATE,
|
||||
.safe_state_index = 0, /* C1 */
|
||||
.state_count = 1,
|
||||
};
|
||||
|
||||
void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
|
||||
|
|
|
@ -29,7 +29,6 @@ static int cpuidle_sleep_enter(struct cpuidle_device *dev,
|
|||
int index)
|
||||
{
|
||||
unsigned long allowed_mode = SUSP_SH_SLEEP;
|
||||
ktime_t before, after;
|
||||
int requested_state = index;
|
||||
int allowed_state;
|
||||
int k;
|
||||
|
@ -47,19 +46,16 @@ static int cpuidle_sleep_enter(struct cpuidle_device *dev,
|
|||
*/
|
||||
k = min_t(int, allowed_state, requested_state);
|
||||
|
||||
before = ktime_get();
|
||||
sh_mobile_call_standby(cpuidle_mode[k]);
|
||||
after = ktime_get();
|
||||
|
||||
dev->last_residency = (int)ktime_to_ns(ktime_sub(after, before)) >> 10;
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
static struct cpuidle_device cpuidle_dev;
|
||||
static struct cpuidle_driver cpuidle_driver = {
|
||||
.name = "sh_idle",
|
||||
.owner = THIS_MODULE,
|
||||
.name = "sh_idle",
|
||||
.owner = THIS_MODULE,
|
||||
.en_core_tk_irqen = 1,
|
||||
};
|
||||
|
||||
void sh_mobile_setup_cpuidle(void)
|
||||
|
|
|
@ -642,6 +642,7 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
|||
kfree(buffer.pointer);
|
||||
buffer.length = ACPI_ALLOCATE_BUFFER;
|
||||
buffer.pointer = NULL;
|
||||
lapic = NULL;
|
||||
|
||||
if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
|
||||
goto out;
|
||||
|
@ -650,7 +651,7 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
|||
goto free_tmp_map;
|
||||
|
||||
cpumask_copy(tmp_map, cpu_present_mask);
|
||||
acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
|
||||
acpi_register_lapic(physid, ACPI_MADT_ENABLED);
|
||||
|
||||
/*
|
||||
* If mp_register_lapic successfully generates a new logical cpu
|
||||
|
|
|
@ -50,6 +50,7 @@
|
|||
#include <linux/tboot.h>
|
||||
#include <linux/stackprotector.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/cpuidle.h>
|
||||
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/desc.h>
|
||||
|
@ -1404,7 +1405,8 @@ void native_play_dead(void)
|
|||
tboot_shutdown(TB_SHUTDOWN_WFS);
|
||||
|
||||
mwait_play_dead(); /* Only returns on failure */
|
||||
hlt_play_dead();
|
||||
if (cpuidle_play_dead())
|
||||
hlt_play_dead();
|
||||
}
|
||||
|
||||
#else /* ... !CONFIG_HOTPLUG_CPU */
|
||||
|
|
|
@ -272,7 +272,7 @@ static void tboot_copy_fadt(const struct acpi_table_fadt *fadt)
|
|||
offsetof(struct acpi_table_facs, firmware_waking_vector);
|
||||
}
|
||||
|
||||
void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
|
||||
static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
|
||||
{
|
||||
static u32 acpi_shutdown_map[ACPI_S_STATE_COUNT] = {
|
||||
/* S0,1,2: */ -1, -1, -1,
|
||||
|
@ -281,7 +281,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
|
|||
/* S5: */ TB_SHUTDOWN_S5 };
|
||||
|
||||
if (!tboot_enabled())
|
||||
return;
|
||||
return 0;
|
||||
|
||||
tboot_copy_fadt(&acpi_gbl_FADT);
|
||||
tboot->acpi_sinfo.pm1a_cnt_val = pm1a_control;
|
||||
|
@ -292,10 +292,11 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
|
|||
if (sleep_state >= ACPI_S_STATE_COUNT ||
|
||||
acpi_shutdown_map[sleep_state] == -1) {
|
||||
pr_warning("unsupported sleep state 0x%x\n", sleep_state);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
tboot_shutdown(acpi_shutdown_map[sleep_state]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static atomic_t ap_wfs_count;
|
||||
|
@ -345,6 +346,8 @@ static __init int tboot_late_init(void)
|
|||
|
||||
atomic_set(&ap_wfs_count, 0);
|
||||
register_hotcpu_notifier(&tboot_cpu_notifier);
|
||||
|
||||
acpi_os_set_prepare_sleep(&tboot_sleep);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -384,6 +384,15 @@ config ACPI_CUSTOM_METHOD
|
|||
load additional kernel modules after boot, this feature may be used
|
||||
to override that restriction).
|
||||
|
||||
config ACPI_BGRT
|
||||
tristate "Boottime Graphics Resource Table support"
|
||||
default n
|
||||
help
|
||||
This driver adds support for exposing the ACPI Boottime Graphics
|
||||
Resource Table, which allows the operating system to obtain
|
||||
data from the firmware boot splash. It will appear under
|
||||
/sys/firmware/acpi/bgrt/ .
|
||||
|
||||
source "drivers/acpi/apei/Kconfig"
|
||||
|
||||
endif # ACPI
|
||||
|
|
|
@ -62,6 +62,7 @@ obj-$(CONFIG_ACPI_SBS) += sbs.o
|
|||
obj-$(CONFIG_ACPI_HED) += hed.o
|
||||
obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o
|
||||
obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
|
||||
obj-$(CONFIG_ACPI_BGRT) += bgrt.o
|
||||
|
||||
# processor has its own "processor." module_param namespace
|
||||
processor-y := processor_driver.o processor_throttling.o
|
||||
|
|
|
@ -68,12 +68,14 @@ acpi-y += \
|
|||
|
||||
acpi-y += \
|
||||
hwacpi.o \
|
||||
hwesleep.o \
|
||||
hwgpe.o \
|
||||
hwpci.o \
|
||||
hwregs.o \
|
||||
hwsleep.o \
|
||||
hwvalid.o \
|
||||
hwxface.o
|
||||
hwxface.o \
|
||||
hwxfsleep.o
|
||||
|
||||
acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
|
||||
|
||||
|
|
|
@ -51,7 +51,6 @@
|
|||
*
|
||||
* Note: The order of these include files is important.
|
||||
*/
|
||||
#include "acconfig.h" /* Global configuration constants */
|
||||
#include "acmacros.h" /* C macros */
|
||||
#include "aclocal.h" /* Internal data types */
|
||||
#include "acobject.h" /* ACPI internal object */
|
||||
|
|
|
@ -111,7 +111,7 @@ acpi_status acpi_db_find_name_in_namespace(char *name_arg);
|
|||
|
||||
void acpi_db_set_scope(char *name);
|
||||
|
||||
acpi_status acpi_db_sleep(char *object_arg);
|
||||
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_db_sleep(char *object_arg))
|
||||
|
||||
void acpi_db_find_references(char *object_arg);
|
||||
|
||||
|
@ -119,11 +119,13 @@ void acpi_db_display_locks(void);
|
|||
|
||||
void acpi_db_display_resources(char *object_arg);
|
||||
|
||||
void acpi_db_display_gpes(void);
|
||||
ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_display_gpes(void))
|
||||
|
||||
void acpi_db_check_integrity(void);
|
||||
|
||||
void acpi_db_generate_gpe(char *gpe_arg, char *block_arg);
|
||||
ACPI_HW_DEPENDENT_RETURN_VOID(void
|
||||
acpi_db_generate_gpe(char *gpe_arg,
|
||||
char *block_arg))
|
||||
|
||||
void acpi_db_check_predefined_names(void);
|
||||
|
||||
|
|
|
@ -69,11 +69,10 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
|
|||
*/
|
||||
acpi_status acpi_ev_init_global_lock_handler(void);
|
||||
|
||||
acpi_status acpi_ev_acquire_global_lock(u16 timeout);
|
||||
|
||||
acpi_status acpi_ev_release_global_lock(void);
|
||||
|
||||
acpi_status acpi_ev_remove_global_lock_handler(void);
|
||||
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
|
||||
acpi_ev_acquire_global_lock(u16 timeout))
|
||||
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
|
||||
acpi_status acpi_ev_remove_global_lock_handler(void);
|
||||
|
||||
/*
|
||||
* evgpe - Low-level GPE support
|
||||
|
@ -114,7 +113,9 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
|||
struct acpi_gpe_block_info *gpe_block,
|
||||
void *context);
|
||||
|
||||
acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block);
|
||||
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
|
||||
acpi_ev_delete_gpe_block(struct acpi_gpe_block_info
|
||||
*gpe_block))
|
||||
|
||||
u32
|
||||
acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
|
||||
|
@ -126,9 +127,10 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
|
|||
*/
|
||||
acpi_status acpi_ev_gpe_initialize(void);
|
||||
|
||||
void acpi_ev_update_gpes(acpi_owner_id table_owner_id);
|
||||
ACPI_HW_DEPENDENT_RETURN_VOID(void
|
||||
acpi_ev_update_gpes(acpi_owner_id table_owner_id))
|
||||
|
||||
acpi_status
|
||||
acpi_status
|
||||
acpi_ev_match_gpe_method(acpi_handle obj_handle,
|
||||
u32 level, void *context, void **return_value);
|
||||
|
||||
|
@ -237,6 +239,5 @@ acpi_status acpi_ev_remove_sci_handler(void);
|
|||
|
||||
u32 acpi_ev_initialize_sCI(u32 program_sCI);
|
||||
|
||||
void acpi_ev_terminate(void);
|
||||
|
||||
ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_ev_terminate(void))
|
||||
#endif /* __ACEVENTS_H__ */
|
||||
|
|
|
@ -147,7 +147,7 @@ u8 acpi_gbl_system_awake_and_running;
|
|||
*/
|
||||
u8 acpi_gbl_reduced_hardware;
|
||||
|
||||
#endif
|
||||
#endif /* DEFINE_ACPI_GLOBALS */
|
||||
|
||||
/* Do not disassemble buffers to resource descriptors */
|
||||
|
||||
|
@ -184,8 +184,12 @@ ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
|
|||
* found in the RSDT/XSDT.
|
||||
*/
|
||||
ACPI_EXTERN struct acpi_table_list acpi_gbl_root_table_list;
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS;
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
||||
/* These addresses are calculated from the FADT Event Block addresses */
|
||||
|
||||
ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_status;
|
||||
|
@ -397,10 +401,15 @@ ACPI_EXTERN struct acpi_fixed_event_handler
|
|||
ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
|
||||
ACPI_EXTERN struct acpi_gpe_block_info
|
||||
*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
|
||||
ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
|
||||
ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler;
|
||||
ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
||||
/*****************************************************************************
|
||||
*
|
||||
* Debugger globals
|
||||
|
|
|
@ -80,6 +80,26 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value);
|
|||
|
||||
acpi_status acpi_hw_clear_acpi_status(void);
|
||||
|
||||
/*
|
||||
* hwsleep - sleep/wake support (Legacy sleep registers)
|
||||
*/
|
||||
acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags);
|
||||
|
||||
acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags);
|
||||
|
||||
acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags);
|
||||
|
||||
/*
|
||||
* hwesleep - sleep/wake support (Extended FADT-V5 sleep registers)
|
||||
*/
|
||||
void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument);
|
||||
|
||||
acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags);
|
||||
|
||||
acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags);
|
||||
|
||||
acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags);
|
||||
|
||||
/*
|
||||
* hwvalid - Port I/O with validation
|
||||
*/
|
||||
|
@ -128,16 +148,4 @@ acpi_status
|
|||
acpi_hw_derive_pci_id(struct acpi_pci_id *pci_id,
|
||||
acpi_handle root_pci_device, acpi_handle pci_region);
|
||||
|
||||
#ifdef ACPI_FUTURE_USAGE
|
||||
/*
|
||||
* hwtimer - ACPI Timer prototypes
|
||||
*/
|
||||
acpi_status acpi_get_timer_resolution(u32 * resolution);
|
||||
|
||||
acpi_status acpi_get_timer(u32 * ticks);
|
||||
|
||||
acpi_status
|
||||
acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed);
|
||||
#endif /* ACPI_FUTURE_USAGE */
|
||||
|
||||
#endif /* __ACHWARE_H__ */
|
||||
|
|
|
@ -370,6 +370,7 @@ struct acpi_predefined_data {
|
|||
/* Defines for Flags field above */
|
||||
|
||||
#define ACPI_OBJECT_REPAIRED 1
|
||||
#define ACPI_OBJECT_WRAPPED 2
|
||||
|
||||
/*
|
||||
* Bitmapped return value types
|
||||
|
|
|
@ -516,6 +516,12 @@
|
|||
|
||||
#endif /* ACPI_DEBUG_OUTPUT */
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
#define ACPI_HW_OPTIONAL_FUNCTION(addr) addr
|
||||
#else
|
||||
#define ACPI_HW_OPTIONAL_FUNCTION(addr) NULL
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Some code only gets executed when the debugger is built in.
|
||||
* Note that this is entirely independent of whether the
|
||||
|
|
|
@ -283,8 +283,9 @@ acpi_ns_repair_object(struct acpi_predefined_data *data,
|
|||
union acpi_operand_object **return_object_ptr);
|
||||
|
||||
acpi_status
|
||||
acpi_ns_repair_package_list(struct acpi_predefined_data *data,
|
||||
union acpi_operand_object **obj_desc_ptr);
|
||||
acpi_ns_wrap_with_package(struct acpi_predefined_data *data,
|
||||
union acpi_operand_object *original_object,
|
||||
union acpi_operand_object **obj_desc_ptr);
|
||||
|
||||
acpi_status
|
||||
acpi_ns_repair_null_element(struct acpi_predefined_data *data,
|
||||
|
|
|
@ -67,6 +67,11 @@ acpi_status acpi_tb_resize_root_table_list(void);
|
|||
|
||||
acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc);
|
||||
|
||||
struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
|
||||
*table_header,
|
||||
struct acpi_table_desc
|
||||
*table_desc);
|
||||
|
||||
acpi_status
|
||||
acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index);
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
|
||||
#define _COMPONENT ACPI_EVENTS
|
||||
ACPI_MODULE_NAME("evevent")
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
|
||||
/* Local prototypes */
|
||||
static acpi_status acpi_ev_fixed_event_initialize(void);
|
||||
|
||||
|
@ -291,3 +291,5 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
|
|||
return ((acpi_gbl_fixed_event_handlers[event].
|
||||
handler) (acpi_gbl_fixed_event_handlers[event].context));
|
||||
}
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
|
||||
#define _COMPONENT ACPI_EVENTS
|
||||
ACPI_MODULE_NAME("evglock")
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
|
||||
/* Local prototypes */
|
||||
static u32 acpi_ev_global_lock_handler(void *context);
|
||||
|
||||
|
@ -339,3 +339,5 @@ acpi_status acpi_ev_release_global_lock(void)
|
|||
acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
|
||||
#define _COMPONENT ACPI_EVENTS
|
||||
ACPI_MODULE_NAME("evgpe")
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
|
||||
/* Local prototypes */
|
||||
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
|
||||
|
||||
|
@ -766,3 +766,5 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
|
|||
|
||||
return_UINT32(ACPI_INTERRUPT_HANDLED);
|
||||
}
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
|
||||
#define _COMPONENT ACPI_EVENTS
|
||||
ACPI_MODULE_NAME("evgpeblk")
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
|
||||
/* Local prototypes */
|
||||
static acpi_status
|
||||
acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
|
||||
|
@ -504,3 +504,5 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
|||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
|
||||
#define _COMPONENT ACPI_EVENTS
|
||||
ACPI_MODULE_NAME("evgpeinit")
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
|
||||
/*
|
||||
* Note: History of _PRW support in ACPICA
|
||||
*
|
||||
|
@ -440,3 +440,5 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
|
|||
name, gpe_number));
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
#define _COMPONENT ACPI_EVENTS
|
||||
ACPI_MODULE_NAME("evgpeutil")
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_walk_gpe_list
|
||||
|
@ -374,3 +375,5 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
|||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
|
|
@ -108,27 +108,30 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
|
|||
ACPI_FUNCTION_NAME(ev_queue_notify_request);
|
||||
|
||||
/*
|
||||
* For value 3 (Ejection Request), some device method may need to be run.
|
||||
* For value 2 (Device Wake) if _PRW exists, the _PS0 method may need
|
||||
* to be run.
|
||||
* For value 0x03 (Ejection Request), may need to run a device method.
|
||||
* For value 0x02 (Device Wake), if _PRW exists, may need to run
|
||||
* the _PS0 method.
|
||||
* For value 0x80 (Status Change) on the power button or sleep button,
|
||||
* initiate soft-off or sleep operation?
|
||||
* initiate soft-off or sleep operation.
|
||||
*
|
||||
* For all cases, simply dispatch the notify to the handler.
|
||||
*/
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Dispatching Notify on [%4.4s] Node %p Value 0x%2.2X (%s)\n",
|
||||
acpi_ut_get_node_name(node), node, notify_value,
|
||||
acpi_ut_get_notify_name(notify_value)));
|
||||
"Dispatching Notify on [%4.4s] (%s) Value 0x%2.2X (%s) Node %p\n",
|
||||
acpi_ut_get_node_name(node),
|
||||
acpi_ut_get_type_name(node->type), notify_value,
|
||||
acpi_ut_get_notify_name(notify_value), node));
|
||||
|
||||
/* Get the notify object attached to the NS Node */
|
||||
|
||||
obj_desc = acpi_ns_get_attached_object(node);
|
||||
if (obj_desc) {
|
||||
|
||||
/* We have the notify object, Get the right handler */
|
||||
/* We have the notify object, Get the correct handler */
|
||||
|
||||
switch (node->type) {
|
||||
|
||||
/* Notify allowed only on these types */
|
||||
/* Notify is allowed only on these types */
|
||||
|
||||
case ACPI_TYPE_DEVICE:
|
||||
case ACPI_TYPE_THERMAL:
|
||||
|
@ -152,7 +155,7 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
|
|||
}
|
||||
|
||||
/*
|
||||
* If there is any handler to run, schedule the dispatcher.
|
||||
* If there is a handler to run, schedule the dispatcher.
|
||||
* Check for:
|
||||
* 1) Global system notify handler
|
||||
* 2) Global device notify handler
|
||||
|
@ -270,6 +273,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
|
|||
acpi_ut_delete_generic_state(notify_info);
|
||||
}
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
/******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_terminate
|
||||
|
@ -338,3 +342,5 @@ void acpi_ev_terminate(void)
|
|||
}
|
||||
return_VOID;
|
||||
}
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
|
||||
#define _COMPONENT ACPI_EVENTS
|
||||
ACPI_MODULE_NAME("evsci")
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
|
||||
/* Local prototypes */
|
||||
static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context);
|
||||
|
||||
|
@ -181,3 +181,5 @@ acpi_status acpi_ev_remove_sci_handler(void)
|
|||
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
|
|
@ -51,222 +51,6 @@
|
|||
#define _COMPONENT ACPI_EVENTS
|
||||
ACPI_MODULE_NAME("evxface")
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_install_exception_handler
|
||||
*
|
||||
* PARAMETERS: Handler - Pointer to the handler function for the
|
||||
* event
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Saves the pointer to the handler function
|
||||
*
|
||||
******************************************************************************/
|
||||
#ifdef ACPI_FUTURE_USAGE
|
||||
acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_install_exception_handler);
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Don't allow two handlers. */
|
||||
|
||||
if (acpi_gbl_exception_handler) {
|
||||
status = AE_ALREADY_EXISTS;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Install the handler */
|
||||
|
||||
acpi_gbl_exception_handler = handler;
|
||||
|
||||
cleanup:
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
|
||||
#endif /* ACPI_FUTURE_USAGE */
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_install_global_event_handler
|
||||
*
|
||||
* PARAMETERS: Handler - Pointer to the global event handler function
|
||||
* Context - Value passed to the handler on each event
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Saves the pointer to the handler function. The global handler
|
||||
* is invoked upon each incoming GPE and Fixed Event. It is
|
||||
* invoked at interrupt level at the time of the event dispatch.
|
||||
* Can be used to update event counters, etc.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status
|
||||
acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_install_global_event_handler);
|
||||
|
||||
/* Parameter validation */
|
||||
|
||||
if (!handler) {
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Don't allow two handlers. */
|
||||
|
||||
if (acpi_gbl_global_event_handler) {
|
||||
status = AE_ALREADY_EXISTS;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
acpi_gbl_global_event_handler = handler;
|
||||
acpi_gbl_global_event_handler_context = context;
|
||||
|
||||
cleanup:
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_install_fixed_event_handler
|
||||
*
|
||||
* PARAMETERS: Event - Event type to enable.
|
||||
* Handler - Pointer to the handler function for the
|
||||
* event
|
||||
* Context - Value passed to the handler on each GPE
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Saves the pointer to the handler function and then enables the
|
||||
* event.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status
|
||||
acpi_install_fixed_event_handler(u32 event,
|
||||
acpi_event_handler handler, void *context)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler);
|
||||
|
||||
/* Parameter validation */
|
||||
|
||||
if (event > ACPI_EVENT_MAX) {
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Don't allow two handlers. */
|
||||
|
||||
if (NULL != acpi_gbl_fixed_event_handlers[event].handler) {
|
||||
status = AE_ALREADY_EXISTS;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Install the handler before enabling the event */
|
||||
|
||||
acpi_gbl_fixed_event_handlers[event].handler = handler;
|
||||
acpi_gbl_fixed_event_handlers[event].context = context;
|
||||
|
||||
status = acpi_clear_event(event);
|
||||
if (ACPI_SUCCESS(status))
|
||||
status = acpi_enable_event(event, 0);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X",
|
||||
event));
|
||||
|
||||
/* Remove the handler */
|
||||
|
||||
acpi_gbl_fixed_event_handlers[event].handler = NULL;
|
||||
acpi_gbl_fixed_event_handlers[event].context = NULL;
|
||||
} else {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Enabled fixed event %X, Handler=%p\n", event,
|
||||
handler));
|
||||
}
|
||||
|
||||
cleanup:
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_remove_fixed_event_handler
|
||||
*
|
||||
* PARAMETERS: Event - Event type to disable.
|
||||
* Handler - Address of the handler
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Disables the event and unregisters the event handler.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status
|
||||
acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler);
|
||||
|
||||
/* Parameter validation */
|
||||
|
||||
if (event > ACPI_EVENT_MAX) {
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Disable the event before removing the handler */
|
||||
|
||||
status = acpi_disable_event(event, 0);
|
||||
|
||||
/* Always Remove the handler */
|
||||
|
||||
acpi_gbl_fixed_event_handlers[event].handler = NULL;
|
||||
acpi_gbl_fixed_event_handlers[event].context = NULL;
|
||||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_WARNING((AE_INFO,
|
||||
"Could not write to fixed event enable register 0x%X",
|
||||
event));
|
||||
} else {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
|
||||
event));
|
||||
}
|
||||
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
|
@ -334,6 +118,7 @@ acpi_add_handler_object(struct acpi_object_notify_handler *parent_obj,
|
|||
return AE_OK;
|
||||
}
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_install_notify_handler
|
||||
|
@ -703,6 +488,224 @@ acpi_remove_notify_handler(acpi_handle device,
|
|||
|
||||
ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_install_exception_handler
|
||||
*
|
||||
* PARAMETERS: Handler - Pointer to the handler function for the
|
||||
* event
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Saves the pointer to the handler function
|
||||
*
|
||||
******************************************************************************/
|
||||
#ifdef ACPI_FUTURE_USAGE
|
||||
acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_install_exception_handler);
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Don't allow two handlers. */
|
||||
|
||||
if (acpi_gbl_exception_handler) {
|
||||
status = AE_ALREADY_EXISTS;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Install the handler */
|
||||
|
||||
acpi_gbl_exception_handler = handler;
|
||||
|
||||
cleanup:
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
|
||||
#endif /* ACPI_FUTURE_USAGE */
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_install_global_event_handler
|
||||
*
|
||||
* PARAMETERS: Handler - Pointer to the global event handler function
|
||||
* Context - Value passed to the handler on each event
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Saves the pointer to the handler function. The global handler
|
||||
* is invoked upon each incoming GPE and Fixed Event. It is
|
||||
* invoked at interrupt level at the time of the event dispatch.
|
||||
* Can be used to update event counters, etc.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status
|
||||
acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_install_global_event_handler);
|
||||
|
||||
/* Parameter validation */
|
||||
|
||||
if (!handler) {
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Don't allow two handlers. */
|
||||
|
||||
if (acpi_gbl_global_event_handler) {
|
||||
status = AE_ALREADY_EXISTS;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
acpi_gbl_global_event_handler = handler;
|
||||
acpi_gbl_global_event_handler_context = context;
|
||||
|
||||
cleanup:
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_install_fixed_event_handler
|
||||
*
|
||||
* PARAMETERS: Event - Event type to enable.
|
||||
* Handler - Pointer to the handler function for the
|
||||
* event
|
||||
* Context - Value passed to the handler on each GPE
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Saves the pointer to the handler function and then enables the
|
||||
* event.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status
|
||||
acpi_install_fixed_event_handler(u32 event,
|
||||
acpi_event_handler handler, void *context)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler);
|
||||
|
||||
/* Parameter validation */
|
||||
|
||||
if (event > ACPI_EVENT_MAX) {
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Don't allow two handlers. */
|
||||
|
||||
if (NULL != acpi_gbl_fixed_event_handlers[event].handler) {
|
||||
status = AE_ALREADY_EXISTS;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Install the handler before enabling the event */
|
||||
|
||||
acpi_gbl_fixed_event_handlers[event].handler = handler;
|
||||
acpi_gbl_fixed_event_handlers[event].context = context;
|
||||
|
||||
status = acpi_clear_event(event);
|
||||
if (ACPI_SUCCESS(status))
|
||||
status = acpi_enable_event(event, 0);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X",
|
||||
event));
|
||||
|
||||
/* Remove the handler */
|
||||
|
||||
acpi_gbl_fixed_event_handlers[event].handler = NULL;
|
||||
acpi_gbl_fixed_event_handlers[event].context = NULL;
|
||||
} else {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Enabled fixed event %X, Handler=%p\n", event,
|
||||
handler));
|
||||
}
|
||||
|
||||
cleanup:
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_remove_fixed_event_handler
|
||||
*
|
||||
* PARAMETERS: Event - Event type to disable.
|
||||
* Handler - Address of the handler
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Disables the event and unregisters the event handler.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status
|
||||
acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler);
|
||||
|
||||
/* Parameter validation */
|
||||
|
||||
if (event > ACPI_EVENT_MAX) {
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Disable the event before removing the handler */
|
||||
|
||||
status = acpi_disable_event(event, 0);
|
||||
|
||||
/* Always Remove the handler */
|
||||
|
||||
acpi_gbl_fixed_event_handlers[event].handler = NULL;
|
||||
acpi_gbl_fixed_event_handlers[event].context = NULL;
|
||||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_WARNING((AE_INFO,
|
||||
"Could not write to fixed event enable register 0x%X",
|
||||
event));
|
||||
} else {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
|
||||
event));
|
||||
}
|
||||
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_install_gpe_handler
|
||||
|
@ -984,3 +987,4 @@ acpi_status acpi_release_global_lock(u32 handle)
|
|||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_release_global_lock)
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
#define _COMPONENT ACPI_EVENTS
|
||||
ACPI_MODULE_NAME("evxfevnt")
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_enable
|
||||
|
@ -352,3 +353,4 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
|
|||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_get_event_status)
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
|
|
@ -50,6 +50,7 @@
|
|||
#define _COMPONENT ACPI_EVENTS
|
||||
ACPI_MODULE_NAME("evxfgpe")
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
|
||||
/******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_update_all_gpes
|
||||
|
@ -695,3 +696,4 @@ acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
|
|||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
#define _COMPONENT ACPI_HARDWARE
|
||||
ACPI_MODULE_NAME("hwacpi")
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
|
||||
/******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_hw_set_mode
|
||||
|
@ -166,3 +167,5 @@ u32 acpi_hw_get_mode(void)
|
|||
return_UINT32(ACPI_SYS_MODE_LEGACY);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
|
|
@ -0,0 +1,247 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
|
||||
* extended FADT-V5 sleep registers.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2012, Intel Corp.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions, and the following disclaimer,
|
||||
* without modification.
|
||||
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
|
||||
* substantially similar to the "NO WARRANTY" disclaimer below
|
||||
* ("Disclaimer") and any redistribution must be conditioned upon
|
||||
* including a substantially similar Disclaimer requirement for further
|
||||
* binary redistribution.
|
||||
* 3. Neither the names of the above-listed copyright holders nor the names
|
||||
* of any contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* Alternatively, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") version 2 as published by the Free
|
||||
* Software Foundation.
|
||||
*
|
||||
* NO WARRANTY
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
||||
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
|
||||
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGES.
|
||||
*/
|
||||
|
||||
#include <acpi/acpi.h>
|
||||
#include "accommon.h"
|
||||
|
||||
#define _COMPONENT ACPI_HARDWARE
|
||||
ACPI_MODULE_NAME("hwesleep")
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_hw_execute_sleep_method
|
||||
*
|
||||
* PARAMETERS: method_pathname - Pathname of method to execute
|
||||
* integer_argument - Argument to pass to the method
|
||||
*
|
||||
* RETURN: None
|
||||
*
|
||||
* DESCRIPTION: Execute a sleep/wake related method with one integer argument
|
||||
* and no return value.
|
||||
*
|
||||
******************************************************************************/
|
||||
void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
|
||||
{
|
||||
struct acpi_object_list arg_list;
|
||||
union acpi_object arg;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(hw_execute_sleep_method);
|
||||
|
||||
/* One argument, integer_argument; No return value expected */
|
||||
|
||||
arg_list.count = 1;
|
||||
arg_list.pointer = &arg;
|
||||
arg.type = ACPI_TYPE_INTEGER;
|
||||
arg.integer.value = (u64)integer_argument;
|
||||
|
||||
status = acpi_evaluate_object(NULL, method_pathname, &arg_list, NULL);
|
||||
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
||||
ACPI_EXCEPTION((AE_INFO, status, "While executing method %s",
|
||||
method_pathname));
|
||||
}
|
||||
|
||||
return_VOID;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_hw_extended_sleep
|
||||
*
|
||||
* PARAMETERS: sleep_state - Which sleep state to enter
|
||||
* Flags - ACPI_EXECUTE_GTS to run optional method
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Enter a system sleep state via the extended FADT sleep
|
||||
* registers (V5 FADT).
|
||||
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
|
||||
{
|
||||
acpi_status status;
|
||||
u8 sleep_type_value;
|
||||
u64 sleep_status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(hw_extended_sleep);
|
||||
|
||||
/* Extended sleep registers must be valid */
|
||||
|
||||
if (!acpi_gbl_FADT.sleep_control.address ||
|
||||
!acpi_gbl_FADT.sleep_status.address) {
|
||||
return_ACPI_STATUS(AE_NOT_EXIST);
|
||||
}
|
||||
|
||||
/* Clear wake status (WAK_STS) */
|
||||
|
||||
status = acpi_write(ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
acpi_gbl_system_awake_and_running = FALSE;
|
||||
|
||||
/* Optionally execute _GTS (Going To Sleep) */
|
||||
|
||||
if (flags & ACPI_EXECUTE_GTS) {
|
||||
acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
|
||||
}
|
||||
|
||||
/* Flush caches, as per ACPI specification */
|
||||
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
||||
/*
|
||||
* Set the SLP_TYP and SLP_EN bits.
|
||||
*
|
||||
* Note: We only use the first value returned by the \_Sx method
|
||||
* (acpi_gbl_sleep_type_a) - As per ACPI specification.
|
||||
*/
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
|
||||
"Entering sleep state [S%u]\n", sleep_state));
|
||||
|
||||
sleep_type_value =
|
||||
((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
|
||||
ACPI_X_SLEEP_TYPE_MASK);
|
||||
|
||||
status = acpi_write((sleep_type_value | ACPI_X_SLEEP_ENABLE),
|
||||
&acpi_gbl_FADT.sleep_control);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Wait for transition back to Working State */
|
||||
|
||||
do {
|
||||
status = acpi_read(&sleep_status, &acpi_gbl_FADT.sleep_status);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
} while (!(((u8)sleep_status) & ACPI_X_WAKE_STATUS));
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_hw_extended_wake_prep
|
||||
*
|
||||
* PARAMETERS: sleep_state - Which sleep state we just exited
|
||||
* Flags - ACPI_EXECUTE_BFS to run optional method
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Perform first part of OS-independent ACPI cleanup after
|
||||
* a sleep. Called with interrupts ENABLED.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
|
||||
{
|
||||
acpi_status status;
|
||||
u8 sleep_type_value;
|
||||
|
||||
ACPI_FUNCTION_TRACE(hw_extended_wake_prep);
|
||||
|
||||
status = acpi_get_sleep_type_data(ACPI_STATE_S0,
|
||||
&acpi_gbl_sleep_type_a,
|
||||
&acpi_gbl_sleep_type_b);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
sleep_type_value =
|
||||
((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
|
||||
ACPI_X_SLEEP_TYPE_MASK);
|
||||
|
||||
(void)acpi_write((sleep_type_value | ACPI_X_SLEEP_ENABLE),
|
||||
&acpi_gbl_FADT.sleep_control);
|
||||
}
|
||||
|
||||
/* Optionally execute _BFS (Back From Sleep) */
|
||||
|
||||
if (flags & ACPI_EXECUTE_BFS) {
|
||||
acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
|
||||
}
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_hw_extended_wake
|
||||
*
|
||||
* PARAMETERS: sleep_state - Which sleep state we just exited
|
||||
* Flags - Reserved, set to zero
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Perform OS-independent ACPI cleanup after a sleep
|
||||
* Called with interrupts ENABLED.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags)
|
||||
{
|
||||
ACPI_FUNCTION_TRACE(hw_extended_wake);
|
||||
|
||||
/* Ensure enter_sleep_state_prep -> enter_sleep_state ordering */
|
||||
|
||||
acpi_gbl_sleep_type_a = ACPI_SLEEP_TYPE_INVALID;
|
||||
|
||||
/* Execute the wake methods */
|
||||
|
||||
acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WAKING);
|
||||
acpi_hw_execute_sleep_method(METHOD_PATHNAME__WAK, sleep_state);
|
||||
|
||||
/*
|
||||
* Some BIOS code assumes that WAK_STS will be cleared on resume
|
||||
* and use it to determine whether the system is rebooting or
|
||||
* resuming. Clear WAK_STS for compatibility.
|
||||
*/
|
||||
(void)acpi_write(ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
|
||||
acpi_gbl_system_awake_and_running = TRUE;
|
||||
|
||||
acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
|
@ -48,7 +48,7 @@
|
|||
|
||||
#define _COMPONENT ACPI_HARDWARE
|
||||
ACPI_MODULE_NAME("hwgpe")
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
|
||||
/* Local prototypes */
|
||||
static acpi_status
|
||||
acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
||||
|
@ -479,3 +479,5 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
|
|||
status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block, NULL);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
|
|
@ -51,6 +51,7 @@
|
|||
#define _COMPONENT ACPI_HARDWARE
|
||||
ACPI_MODULE_NAME("hwregs")
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
/* Local Prototypes */
|
||||
static acpi_status
|
||||
acpi_hw_read_multiple(u32 *value,
|
||||
|
@ -62,6 +63,8 @@ acpi_hw_write_multiple(u32 value,
|
|||
struct acpi_generic_address *register_a,
|
||||
struct acpi_generic_address *register_b);
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_hw_validate_register
|
||||
|
@ -154,6 +157,7 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
|
|||
acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
|
||||
{
|
||||
u64 address;
|
||||
u64 value64;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_NAME(hw_read);
|
||||
|
@ -175,7 +179,9 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
|
|||
*/
|
||||
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
|
||||
status = acpi_os_read_memory((acpi_physical_address)
|
||||
address, value, reg->bit_width);
|
||||
address, &value64, reg->bit_width);
|
||||
|
||||
*value = (u32)value64;
|
||||
} else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
|
||||
|
||||
status = acpi_hw_read_port((acpi_io_address)
|
||||
|
@ -225,7 +231,8 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
|
|||
*/
|
||||
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
|
||||
status = acpi_os_write_memory((acpi_physical_address)
|
||||
address, value, reg->bit_width);
|
||||
address, (u64)value,
|
||||
reg->bit_width);
|
||||
} else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
|
||||
|
||||
status = acpi_hw_write_port((acpi_io_address)
|
||||
|
@ -240,6 +247,7 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
|
|||
return (status);
|
||||
}
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_hw_clear_acpi_status
|
||||
|
@ -285,7 +293,7 @@ exit:
|
|||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_hw_get_register_bit_mask
|
||||
* FUNCTION: acpi_hw_get_bit_register_info
|
||||
*
|
||||
* PARAMETERS: register_id - Index of ACPI Register to access
|
||||
*
|
||||
|
@ -658,3 +666,5 @@ acpi_hw_write_multiple(u32 value,
|
|||
|
||||
return (status);
|
||||
}
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
|
||||
/******************************************************************************
|
||||
*
|
||||
* Name: hwsleep.c - ACPI Hardware Sleep/Wake Interface
|
||||
* Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
|
||||
* original/legacy sleep/PM registers.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
|
@ -43,213 +43,37 @@
|
|||
*/
|
||||
|
||||
#include <acpi/acpi.h>
|
||||
#include <linux/acpi.h>
|
||||
#include "accommon.h"
|
||||
#include "actables.h"
|
||||
#include <linux/tboot.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#define _COMPONENT ACPI_HARDWARE
|
||||
ACPI_MODULE_NAME("hwsleep")
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_set_firmware_waking_vector
|
||||
*
|
||||
* PARAMETERS: physical_address - 32-bit physical address of ACPI real mode
|
||||
* entry point.
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Sets the 32-bit firmware_waking_vector field of the FACS
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status
|
||||
acpi_set_firmware_waking_vector(u32 physical_address)
|
||||
{
|
||||
ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
|
||||
|
||||
|
||||
/*
|
||||
* According to the ACPI specification 2.0c and later, the 64-bit
|
||||
* waking vector should be cleared and the 32-bit waking vector should
|
||||
* be used, unless we want the wake-up code to be called by the BIOS in
|
||||
* Protected Mode. Some systems (for example HP dv5-1004nr) are known
|
||||
* to fail to resume if the 64-bit vector is used.
|
||||
*/
|
||||
|
||||
/* Set the 32-bit vector */
|
||||
|
||||
acpi_gbl_FACS->firmware_waking_vector = physical_address;
|
||||
|
||||
/* Clear the 64-bit vector if it exists */
|
||||
|
||||
if ((acpi_gbl_FACS->length > 32) && (acpi_gbl_FACS->version >= 1)) {
|
||||
acpi_gbl_FACS->xfirmware_waking_vector = 0;
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
|
||||
|
||||
#if ACPI_MACHINE_WIDTH == 64
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_set_firmware_waking_vector64
|
||||
*
|
||||
* PARAMETERS: physical_address - 64-bit physical address of ACPI protected
|
||||
* mode entry point.
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Sets the 64-bit X_firmware_waking_vector field of the FACS, if
|
||||
* it exists in the table. This function is intended for use with
|
||||
* 64-bit host operating systems.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status
|
||||
acpi_set_firmware_waking_vector64(u64 physical_address)
|
||||
{
|
||||
ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector64);
|
||||
|
||||
|
||||
/* Determine if the 64-bit vector actually exists */
|
||||
|
||||
if ((acpi_gbl_FACS->length <= 32) || (acpi_gbl_FACS->version < 1)) {
|
||||
return_ACPI_STATUS(AE_NOT_EXIST);
|
||||
}
|
||||
|
||||
/* Clear 32-bit vector, set the 64-bit X_ vector */
|
||||
|
||||
acpi_gbl_FACS->firmware_waking_vector = 0;
|
||||
acpi_gbl_FACS->xfirmware_waking_vector = physical_address;
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64)
|
||||
#endif
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_enter_sleep_state_prep
|
||||
* FUNCTION: acpi_hw_legacy_sleep
|
||||
*
|
||||
* PARAMETERS: sleep_state - Which sleep state to enter
|
||||
* Flags - ACPI_EXECUTE_GTS to run optional method
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Prepare to enter a system sleep state (see ACPI 2.0 spec p 231)
|
||||
* This function must execute with interrupts enabled.
|
||||
* We break sleeping into 2 stages so that OSPM can handle
|
||||
* various OS-specific tasks between the two steps.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
|
||||
{
|
||||
acpi_status status;
|
||||
struct acpi_object_list arg_list;
|
||||
union acpi_object arg;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_prep);
|
||||
|
||||
/* _PSW methods could be run here to enable wake-on keyboard, LAN, etc. */
|
||||
|
||||
status = acpi_get_sleep_type_data(sleep_state,
|
||||
&acpi_gbl_sleep_type_a,
|
||||
&acpi_gbl_sleep_type_b);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Setup parameter object */
|
||||
|
||||
arg_list.count = 1;
|
||||
arg_list.pointer = &arg;
|
||||
|
||||
arg.type = ACPI_TYPE_INTEGER;
|
||||
arg.integer.value = sleep_state;
|
||||
|
||||
/* Run the _PTS method */
|
||||
|
||||
status = acpi_evaluate_object(NULL, METHOD_NAME__PTS, &arg_list, NULL);
|
||||
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Setup the argument to _SST */
|
||||
|
||||
switch (sleep_state) {
|
||||
case ACPI_STATE_S0:
|
||||
arg.integer.value = ACPI_SST_WORKING;
|
||||
break;
|
||||
|
||||
case ACPI_STATE_S1:
|
||||
case ACPI_STATE_S2:
|
||||
case ACPI_STATE_S3:
|
||||
arg.integer.value = ACPI_SST_SLEEPING;
|
||||
break;
|
||||
|
||||
case ACPI_STATE_S4:
|
||||
arg.integer.value = ACPI_SST_SLEEP_CONTEXT;
|
||||
break;
|
||||
|
||||
default:
|
||||
arg.integer.value = ACPI_SST_INDICATOR_OFF; /* Default is off */
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the system indicators to show the desired sleep state.
|
||||
* _SST is an optional method (return no error if not found)
|
||||
*/
|
||||
status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
|
||||
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"While executing method _SST"));
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
|
||||
|
||||
static unsigned int gts, bfs;
|
||||
module_param(gts, uint, 0644);
|
||||
module_param(bfs, uint, 0644);
|
||||
MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
|
||||
MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_enter_sleep_state
|
||||
*
|
||||
* PARAMETERS: sleep_state - Which sleep state to enter
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231)
|
||||
* DESCRIPTION: Enter a system sleep state via the legacy FADT PM registers
|
||||
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
|
||||
acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
|
||||
{
|
||||
u32 pm1a_control;
|
||||
u32 pm1b_control;
|
||||
struct acpi_bit_register_info *sleep_type_reg_info;
|
||||
struct acpi_bit_register_info *sleep_enable_reg_info;
|
||||
u32 pm1a_control;
|
||||
u32 pm1b_control;
|
||||
u32 in_value;
|
||||
struct acpi_object_list arg_list;
|
||||
union acpi_object arg;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);
|
||||
|
||||
if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
|
||||
(acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
|
||||
ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X",
|
||||
acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
|
||||
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
|
||||
}
|
||||
ACPI_FUNCTION_TRACE(hw_legacy_sleep);
|
||||
|
||||
sleep_type_reg_info =
|
||||
acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE);
|
||||
|
@ -271,6 +95,18 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
|
|||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
if (sleep_state != ACPI_STATE_S5) {
|
||||
/*
|
||||
* Disable BM arbitration. This feature is contained within an
|
||||
* optional register (PM2 Control), so ignore a BAD_ADDRESS
|
||||
* exception.
|
||||
*/
|
||||
status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
|
||||
if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* 1) Disable/Clear all GPEs
|
||||
* 2) Enable all wakeup GPEs
|
||||
|
@ -286,18 +122,10 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
|
|||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
if (gts) {
|
||||
/* Execute the _GTS method */
|
||||
/* Optionally execute _GTS (Going To Sleep) */
|
||||
|
||||
arg_list.count = 1;
|
||||
arg_list.pointer = &arg;
|
||||
arg.type = ACPI_TYPE_INTEGER;
|
||||
arg.integer.value = sleep_state;
|
||||
|
||||
status = acpi_evaluate_object(NULL, METHOD_NAME__GTS, &arg_list, NULL);
|
||||
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
if (flags & ACPI_EXECUTE_GTS) {
|
||||
acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
|
||||
}
|
||||
|
||||
/* Get current value of PM1A control */
|
||||
|
@ -344,8 +172,12 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
|
|||
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
||||
tboot_sleep(sleep_state, pm1a_control, pm1b_control);
|
||||
|
||||
status = acpi_os_prepare_sleep(sleep_state, pm1a_control,
|
||||
pm1b_control);
|
||||
if (ACPI_SKIP(status))
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
if (ACPI_FAILURE(status))
|
||||
return_ACPI_STATUS(status);
|
||||
/* Write #2: Write both SLP_TYP + SLP_EN */
|
||||
|
||||
status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control);
|
||||
|
@ -375,114 +207,44 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
|
|||
}
|
||||
}
|
||||
|
||||
/* Wait until we enter sleep state */
|
||||
/* Wait for transition back to Working State */
|
||||
|
||||
do {
|
||||
status = acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS,
|
||||
&in_value);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Spin until we wake */
|
||||
|
||||
} while (!in_value);
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_enter_sleep_state_s4bios
|
||||
*
|
||||
* PARAMETERS: None
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Perform a S4 bios request.
|
||||
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
|
||||
{
|
||||
u32 in_value;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
|
||||
|
||||
/* Clear the wake status bit (PM1) */
|
||||
|
||||
status =
|
||||
acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
status = acpi_hw_clear_acpi_status();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*
|
||||
* 1) Disable/Clear all GPEs
|
||||
* 2) Enable all wakeup GPEs
|
||||
*/
|
||||
status = acpi_hw_disable_all_gpes();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
acpi_gbl_system_awake_and_running = FALSE;
|
||||
|
||||
status = acpi_hw_enable_all_wakeup_gpes();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
||||
status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
|
||||
(u32) acpi_gbl_FADT.S4bios_request, 8);
|
||||
|
||||
do {
|
||||
acpi_os_stall(1000);
|
||||
status =
|
||||
acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
} while (!in_value);
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_leave_sleep_state_prep
|
||||
* FUNCTION: acpi_hw_legacy_wake_prep
|
||||
*
|
||||
* PARAMETERS: sleep_state - Which sleep state we are exiting
|
||||
* PARAMETERS: sleep_state - Which sleep state we just exited
|
||||
* Flags - ACPI_EXECUTE_BFS to run optional method
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a
|
||||
* sleep.
|
||||
* Called with interrupts DISABLED.
|
||||
* Called with interrupts ENABLED.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
|
||||
|
||||
acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
|
||||
{
|
||||
struct acpi_object_list arg_list;
|
||||
union acpi_object arg;
|
||||
acpi_status status;
|
||||
struct acpi_bit_register_info *sleep_type_reg_info;
|
||||
struct acpi_bit_register_info *sleep_enable_reg_info;
|
||||
u32 pm1a_control;
|
||||
u32 pm1b_control;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
|
||||
ACPI_FUNCTION_TRACE(hw_legacy_wake_prep);
|
||||
|
||||
/*
|
||||
* Set SLP_TYPE and SLP_EN to state S0.
|
||||
|
@ -525,27 +287,20 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
|
|||
}
|
||||
}
|
||||
|
||||
if (bfs) {
|
||||
/* Execute the _BFS method */
|
||||
/* Optionally execute _BFS (Back From Sleep) */
|
||||
|
||||
arg_list.count = 1;
|
||||
arg_list.pointer = &arg;
|
||||
arg.type = ACPI_TYPE_INTEGER;
|
||||
arg.integer.value = sleep_state;
|
||||
|
||||
status = acpi_evaluate_object(NULL, METHOD_NAME__BFS, &arg_list, NULL);
|
||||
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
||||
ACPI_EXCEPTION((AE_INFO, status, "During Method _BFS"));
|
||||
}
|
||||
if (flags & ACPI_EXECUTE_BFS) {
|
||||
acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
|
||||
}
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_leave_sleep_state
|
||||
* FUNCTION: acpi_hw_legacy_wake
|
||||
*
|
||||
* PARAMETERS: sleep_state - Which sleep state we just exited
|
||||
* Flags - Reserved, set to zero
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
|
@ -553,31 +308,17 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
|
|||
* Called with interrupts ENABLED.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status acpi_leave_sleep_state(u8 sleep_state)
|
||||
|
||||
acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
|
||||
{
|
||||
struct acpi_object_list arg_list;
|
||||
union acpi_object arg;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
|
||||
ACPI_FUNCTION_TRACE(hw_legacy_wake);
|
||||
|
||||
/* Ensure enter_sleep_state_prep -> enter_sleep_state ordering */
|
||||
|
||||
acpi_gbl_sleep_type_a = ACPI_SLEEP_TYPE_INVALID;
|
||||
|
||||
/* Setup parameter object */
|
||||
|
||||
arg_list.count = 1;
|
||||
arg_list.pointer = &arg;
|
||||
arg.type = ACPI_TYPE_INTEGER;
|
||||
|
||||
/* Ignore any errors from these methods */
|
||||
|
||||
arg.integer.value = ACPI_SST_WAKING;
|
||||
status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
|
||||
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
||||
ACPI_EXCEPTION((AE_INFO, status, "During Method _SST"));
|
||||
}
|
||||
acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WAKING);
|
||||
|
||||
/*
|
||||
* GPEs must be enabled before _WAK is called as GPEs
|
||||
|
@ -591,46 +332,50 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
|
|||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
status = acpi_hw_enable_all_runtime_gpes();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
arg.integer.value = sleep_state;
|
||||
status = acpi_evaluate_object(NULL, METHOD_NAME__WAK, &arg_list, NULL);
|
||||
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
||||
ACPI_EXCEPTION((AE_INFO, status, "During Method _WAK"));
|
||||
}
|
||||
/* TBD: _WAK "sometimes" returns stuff - do we want to look at it? */
|
||||
/*
|
||||
* Now we can execute _WAK, etc. Some machines require that the GPEs
|
||||
* are enabled before the wake methods are executed.
|
||||
*/
|
||||
acpi_hw_execute_sleep_method(METHOD_PATHNAME__WAK, sleep_state);
|
||||
|
||||
/*
|
||||
* Some BIOSes assume that WAK_STS will be cleared on resume and use
|
||||
* it to determine whether the system is rebooting or resuming. Clear
|
||||
* it for compatibility.
|
||||
* Some BIOS code assumes that WAK_STS will be cleared on resume
|
||||
* and use it to determine whether the system is rebooting or
|
||||
* resuming. Clear WAK_STS for compatibility.
|
||||
*/
|
||||
acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, 1);
|
||||
|
||||
acpi_gbl_system_awake_and_running = TRUE;
|
||||
|
||||
/* Enable power button */
|
||||
|
||||
(void)
|
||||
acpi_write_bit_register(acpi_gbl_fixed_event_info
|
||||
[ACPI_EVENT_POWER_BUTTON].
|
||||
enable_register_id, ACPI_ENABLE_EVENT);
|
||||
[ACPI_EVENT_POWER_BUTTON].
|
||||
enable_register_id, ACPI_ENABLE_EVENT);
|
||||
|
||||
(void)
|
||||
acpi_write_bit_register(acpi_gbl_fixed_event_info
|
||||
[ACPI_EVENT_POWER_BUTTON].
|
||||
status_register_id, ACPI_CLEAR_STATUS);
|
||||
[ACPI_EVENT_POWER_BUTTON].
|
||||
status_register_id, ACPI_CLEAR_STATUS);
|
||||
|
||||
arg.integer.value = ACPI_SST_WORKING;
|
||||
status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
|
||||
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
||||
ACPI_EXCEPTION((AE_INFO, status, "During Method _SST"));
|
||||
/*
|
||||
* Enable BM arbitration. This feature is contained within an
|
||||
* optional register (PM2 Control), so ignore a BAD_ADDRESS
|
||||
* exception.
|
||||
*/
|
||||
status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
|
||||
if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state)
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
#define _COMPONENT ACPI_HARDWARE
|
||||
ACPI_MODULE_NAME("hwtimer")
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
|
||||
/******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_get_timer_resolution
|
||||
|
@ -187,3 +188,4 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
|
|||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_get_timer_duration)
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
|
|
@ -74,8 +74,7 @@ acpi_status acpi_reset(void)
|
|||
|
||||
/* Check if the reset register is supported */
|
||||
|
||||
if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) ||
|
||||
!reset_reg->address) {
|
||||
if (!reset_reg->address) {
|
||||
return_ACPI_STATUS(AE_NOT_EXIST);
|
||||
}
|
||||
|
||||
|
@ -138,11 +137,6 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
|
|||
return (status);
|
||||
}
|
||||
|
||||
width = reg->bit_width;
|
||||
if (width == 64) {
|
||||
width = 32; /* Break into two 32-bit transfers */
|
||||
}
|
||||
|
||||
/* Initialize entire 64-bit return value to zero */
|
||||
|
||||
*return_value = 0;
|
||||
|
@ -154,25 +148,18 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
|
|||
*/
|
||||
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
|
||||
status = acpi_os_read_memory((acpi_physical_address)
|
||||
address, &value, width);
|
||||
address, return_value,
|
||||
reg->bit_width);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return (status);
|
||||
}
|
||||
*return_value = value;
|
||||
|
||||
if (reg->bit_width == 64) {
|
||||
|
||||
/* Read the top 32 bits */
|
||||
|
||||
status = acpi_os_read_memory((acpi_physical_address)
|
||||
(address + 4), &value, 32);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return (status);
|
||||
}
|
||||
*return_value |= ((u64)value << 32);
|
||||
}
|
||||
} else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
|
||||
|
||||
width = reg->bit_width;
|
||||
if (width == 64) {
|
||||
width = 32; /* Break into two 32-bit transfers */
|
||||
}
|
||||
|
||||
status = acpi_hw_read_port((acpi_io_address)
|
||||
address, &value, width);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
@ -231,33 +218,23 @@ acpi_status acpi_write(u64 value, struct acpi_generic_address *reg)
|
|||
return (status);
|
||||
}
|
||||
|
||||
width = reg->bit_width;
|
||||
if (width == 64) {
|
||||
width = 32; /* Break into two 32-bit transfers */
|
||||
}
|
||||
|
||||
/*
|
||||
* Two address spaces supported: Memory or IO. PCI_Config is
|
||||
* not supported here because the GAS structure is insufficient
|
||||
*/
|
||||
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
|
||||
status = acpi_os_write_memory((acpi_physical_address)
|
||||
address, ACPI_LODWORD(value),
|
||||
width);
|
||||
address, value, reg->bit_width);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return (status);
|
||||
}
|
||||
|
||||
if (reg->bit_width == 64) {
|
||||
status = acpi_os_write_memory((acpi_physical_address)
|
||||
(address + 4),
|
||||
ACPI_HIDWORD(value), 32);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return (status);
|
||||
}
|
||||
}
|
||||
} else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
|
||||
|
||||
width = reg->bit_width;
|
||||
if (width == 64) {
|
||||
width = 32; /* Break into two 32-bit transfers */
|
||||
}
|
||||
|
||||
status = acpi_hw_write_port((acpi_io_address)
|
||||
address, ACPI_LODWORD(value),
|
||||
width);
|
||||
|
@ -286,6 +263,7 @@ acpi_status acpi_write(u64 value, struct acpi_generic_address *reg)
|
|||
|
||||
ACPI_EXPORT_SYMBOL(acpi_write)
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_read_bit_register
|
||||
|
@ -453,7 +431,7 @@ unlock_and_exit:
|
|||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_write_bit_register)
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_get_sleep_type_data
|
||||
|
|
|
@ -0,0 +1,431 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2012, Intel Corp.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions, and the following disclaimer,
|
||||
* without modification.
|
||||
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
|
||||
* substantially similar to the "NO WARRANTY" disclaimer below
|
||||
* ("Disclaimer") and any redistribution must be conditioned upon
|
||||
* including a substantially similar Disclaimer requirement for further
|
||||
* binary redistribution.
|
||||
* 3. Neither the names of the above-listed copyright holders nor the names
|
||||
* of any contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* Alternatively, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") version 2 as published by the Free
|
||||
* Software Foundation.
|
||||
*
|
||||
* NO WARRANTY
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
||||
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
|
||||
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGES.
|
||||
*/
|
||||
|
||||
#include <acpi/acpi.h>
|
||||
#include "accommon.h"
|
||||
#include <linux/module.h>
|
||||
|
||||
#define _COMPONENT ACPI_HARDWARE
|
||||
ACPI_MODULE_NAME("hwxfsleep")
|
||||
|
||||
/* Local prototypes */
|
||||
static acpi_status
|
||||
acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id);
|
||||
|
||||
/*
|
||||
* Dispatch table used to efficiently branch to the various sleep
|
||||
* functions.
|
||||
*/
|
||||
#define ACPI_SLEEP_FUNCTION_ID 0
|
||||
#define ACPI_WAKE_PREP_FUNCTION_ID 1
|
||||
#define ACPI_WAKE_FUNCTION_ID 2
|
||||
|
||||
/* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
|
||||
|
||||
static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
|
||||
{ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
|
||||
acpi_hw_extended_sleep},
|
||||
{ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
|
||||
acpi_hw_extended_wake_prep},
|
||||
{ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
|
||||
};
|
||||
|
||||
/*
|
||||
* These functions are removed for the ACPI_REDUCED_HARDWARE case:
|
||||
* acpi_set_firmware_waking_vector
|
||||
* acpi_set_firmware_waking_vector64
|
||||
* acpi_enter_sleep_state_s4bios
|
||||
*/
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_set_firmware_waking_vector
|
||||
*
|
||||
* PARAMETERS: physical_address - 32-bit physical address of ACPI real mode
|
||||
* entry point.
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Sets the 32-bit firmware_waking_vector field of the FACS
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_set_firmware_waking_vector(u32 physical_address)
|
||||
{
|
||||
ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
|
||||
|
||||
|
||||
/*
|
||||
* According to the ACPI specification 2.0c and later, the 64-bit
|
||||
* waking vector should be cleared and the 32-bit waking vector should
|
||||
* be used, unless we want the wake-up code to be called by the BIOS in
|
||||
* Protected Mode. Some systems (for example HP dv5-1004nr) are known
|
||||
* to fail to resume if the 64-bit vector is used.
|
||||
*/
|
||||
|
||||
/* Set the 32-bit vector */
|
||||
|
||||
acpi_gbl_FACS->firmware_waking_vector = physical_address;
|
||||
|
||||
/* Clear the 64-bit vector if it exists */
|
||||
|
||||
if ((acpi_gbl_FACS->length > 32) && (acpi_gbl_FACS->version >= 1)) {
|
||||
acpi_gbl_FACS->xfirmware_waking_vector = 0;
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
|
||||
|
||||
#if ACPI_MACHINE_WIDTH == 64
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_set_firmware_waking_vector64
|
||||
*
|
||||
* PARAMETERS: physical_address - 64-bit physical address of ACPI protected
|
||||
* mode entry point.
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Sets the 64-bit X_firmware_waking_vector field of the FACS, if
|
||||
* it exists in the table. This function is intended for use with
|
||||
* 64-bit host operating systems.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status acpi_set_firmware_waking_vector64(u64 physical_address)
|
||||
{
|
||||
ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector64);
|
||||
|
||||
|
||||
/* Determine if the 64-bit vector actually exists */
|
||||
|
||||
if ((acpi_gbl_FACS->length <= 32) || (acpi_gbl_FACS->version < 1)) {
|
||||
return_ACPI_STATUS(AE_NOT_EXIST);
|
||||
}
|
||||
|
||||
/* Clear 32-bit vector, set the 64-bit X_ vector */
|
||||
|
||||
acpi_gbl_FACS->firmware_waking_vector = 0;
|
||||
acpi_gbl_FACS->xfirmware_waking_vector = physical_address;
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64)
|
||||
#endif
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_enter_sleep_state_s4bios
|
||||
*
|
||||
* PARAMETERS: None
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Perform a S4 bios request.
|
||||
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
|
||||
{
|
||||
u32 in_value;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
|
||||
|
||||
/* Clear the wake status bit (PM1) */
|
||||
|
||||
status =
|
||||
acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
status = acpi_hw_clear_acpi_status();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*
|
||||
* 1) Disable/Clear all GPEs
|
||||
* 2) Enable all wakeup GPEs
|
||||
*/
|
||||
status = acpi_hw_disable_all_gpes();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
acpi_gbl_system_awake_and_running = FALSE;
|
||||
|
||||
status = acpi_hw_enable_all_wakeup_gpes();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
||||
status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
|
||||
(u32)acpi_gbl_FADT.S4bios_request, 8);
|
||||
|
||||
do {
|
||||
acpi_os_stall(1000);
|
||||
status =
|
||||
acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
} while (!in_value);
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_hw_sleep_dispatch
|
||||
*
|
||||
* PARAMETERS: sleep_state - Which sleep state to enter/exit
|
||||
* function_id - Sleep, wake_prep, or Wake
|
||||
*
|
||||
* RETURN: Status from the invoked sleep handling function.
|
||||
*
|
||||
* DESCRIPTION: Dispatch a sleep/wake request to the appropriate handling
|
||||
* function.
|
||||
*
|
||||
******************************************************************************/
|
||||
static acpi_status
|
||||
acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
|
||||
{
|
||||
acpi_status status;
|
||||
struct acpi_sleep_functions *sleep_functions =
|
||||
&acpi_sleep_dispatch[function_id];
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
|
||||
/*
|
||||
* If the Hardware Reduced flag is set (from the FADT), we must
|
||||
* use the extended sleep registers
|
||||
*/
|
||||
if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) {
|
||||
status = sleep_functions->extended_function(sleep_state, flags);
|
||||
} else {
|
||||
/* Legacy sleep */
|
||||
|
||||
status = sleep_functions->legacy_function(sleep_state, flags);
|
||||
}
|
||||
|
||||
return (status);
|
||||
|
||||
#else
|
||||
/*
|
||||
* For the case where reduced-hardware-only code is being generated,
|
||||
* we know that only the extended sleep registers are available
|
||||
*/
|
||||
status = sleep_functions->extended_function(sleep_state, flags);
|
||||
return (status);
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_enter_sleep_state_prep
|
||||
*
|
||||
* PARAMETERS: sleep_state - Which sleep state to enter
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Prepare to enter a system sleep state.
|
||||
* This function must execute with interrupts enabled.
|
||||
* We break sleeping into 2 stages so that OSPM can handle
|
||||
* various OS-specific tasks between the two steps.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
|
||||
{
|
||||
acpi_status status;
|
||||
struct acpi_object_list arg_list;
|
||||
union acpi_object arg;
|
||||
u32 sst_value;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_prep);
|
||||
|
||||
status = acpi_get_sleep_type_data(sleep_state,
|
||||
&acpi_gbl_sleep_type_a,
|
||||
&acpi_gbl_sleep_type_b);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Execute the _PTS method (Prepare To Sleep) */
|
||||
|
||||
arg_list.count = 1;
|
||||
arg_list.pointer = &arg;
|
||||
arg.type = ACPI_TYPE_INTEGER;
|
||||
arg.integer.value = sleep_state;
|
||||
|
||||
status =
|
||||
acpi_evaluate_object(NULL, METHOD_PATHNAME__PTS, &arg_list, NULL);
|
||||
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Setup the argument to the _SST method (System STatus) */
|
||||
|
||||
switch (sleep_state) {
|
||||
case ACPI_STATE_S0:
|
||||
sst_value = ACPI_SST_WORKING;
|
||||
break;
|
||||
|
||||
case ACPI_STATE_S1:
|
||||
case ACPI_STATE_S2:
|
||||
case ACPI_STATE_S3:
|
||||
sst_value = ACPI_SST_SLEEPING;
|
||||
break;
|
||||
|
||||
case ACPI_STATE_S4:
|
||||
sst_value = ACPI_SST_SLEEP_CONTEXT;
|
||||
break;
|
||||
|
||||
default:
|
||||
sst_value = ACPI_SST_INDICATOR_OFF; /* Default is off */
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the system indicators to show the desired sleep state.
|
||||
* _SST is an optional method (return no error if not found)
|
||||
*/
|
||||
acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, sst_value);
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_enter_sleep_state
|
||||
*
|
||||
* PARAMETERS: sleep_state - Which sleep state to enter
|
||||
* Flags - ACPI_EXECUTE_GTS to run optional method
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231)
|
||||
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);
|
||||
|
||||
if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
|
||||
(acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
|
||||
ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X",
|
||||
acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
|
||||
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
|
||||
}
|
||||
|
||||
status =
|
||||
acpi_hw_sleep_dispatch(sleep_state, flags, ACPI_SLEEP_FUNCTION_ID);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_leave_sleep_state_prep
|
||||
*
|
||||
* PARAMETERS: sleep_state - Which sleep state we are exiting
|
||||
* Flags - ACPI_EXECUTE_BFS to run optional method
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a
|
||||
* sleep.
|
||||
* Called with interrupts DISABLED.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
|
||||
|
||||
status =
|
||||
acpi_hw_sleep_dispatch(sleep_state, flags,
|
||||
ACPI_WAKE_PREP_FUNCTION_ID);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state_prep)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_leave_sleep_state
|
||||
*
|
||||
* PARAMETERS: sleep_state - Which sleep state we are exiting
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Perform OS-independent ACPI cleanup after a sleep
|
||||
* Called with interrupts ENABLED.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status acpi_leave_sleep_state(u8 sleep_state)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
|
||||
|
||||
|
||||
status = acpi_hw_sleep_dispatch(sleep_state, 0, ACPI_WAKE_FUNCTION_ID);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state)
|
|
@ -242,7 +242,20 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
|
|||
|
||||
if (!obj_desc) {
|
||||
|
||||
/* No attached object, we are done */
|
||||
/* No attached object. Some types should always have an object */
|
||||
|
||||
switch (type) {
|
||||
case ACPI_TYPE_INTEGER:
|
||||
case ACPI_TYPE_PACKAGE:
|
||||
case ACPI_TYPE_BUFFER:
|
||||
case ACPI_TYPE_STRING:
|
||||
case ACPI_TYPE_METHOD:
|
||||
acpi_os_printf("<No attached object>");
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
acpi_os_printf("\n");
|
||||
return (AE_OK);
|
||||
|
|
|
@ -121,7 +121,7 @@ void acpi_ns_dump_root_devices(void)
|
|||
return;
|
||||
}
|
||||
|
||||
status = acpi_get_handle(NULL, ACPI_NS_SYSTEM_BUS, &sys_bus_handle);
|
||||
status = acpi_get_handle(NULL, METHOD_NAME__SB_, &sys_bus_handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -638,8 +638,8 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
|
|||
/* Create the new outer package and populate it */
|
||||
|
||||
status =
|
||||
acpi_ns_repair_package_list(data,
|
||||
return_object_ptr);
|
||||
acpi_ns_wrap_with_package(data, *elements,
|
||||
return_object_ptr);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return (status);
|
||||
}
|
||||
|
|
|
@ -71,11 +71,10 @@ ACPI_MODULE_NAME("nsrepair")
|
|||
* Buffer -> String
|
||||
* Buffer -> Package of Integers
|
||||
* Package -> Package of one Package
|
||||
* An incorrect standalone object is wrapped with required outer package
|
||||
*
|
||||
* Additional possible repairs:
|
||||
*
|
||||
* Required package elements that are NULL replaced by Integer/String/Buffer
|
||||
* Incorrect standalone package wrapped with required outer package
|
||||
*
|
||||
******************************************************************************/
|
||||
/* Local prototypes */
|
||||
|
@ -91,10 +90,6 @@ static acpi_status
|
|||
acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
|
||||
union acpi_operand_object **return_object);
|
||||
|
||||
static acpi_status
|
||||
acpi_ns_convert_to_package(union acpi_operand_object *original_object,
|
||||
union acpi_operand_object **return_object);
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ns_repair_object
|
||||
|
@ -151,9 +146,24 @@ acpi_ns_repair_object(struct acpi_predefined_data *data,
|
|||
}
|
||||
}
|
||||
if (expected_btypes & ACPI_RTYPE_PACKAGE) {
|
||||
status = acpi_ns_convert_to_package(return_object, &new_object);
|
||||
/*
|
||||
* A package is expected. We will wrap the existing object with a
|
||||
* new package object. It is often the case that if a variable-length
|
||||
* package is required, but there is only a single object needed, the
|
||||
* BIOS will return that object instead of wrapping it with a Package
|
||||
* object. Note: after the wrapping, the package will be validated
|
||||
* for correct contents (expected object type or types).
|
||||
*/
|
||||
status =
|
||||
acpi_ns_wrap_with_package(data, return_object, &new_object);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
goto object_repaired;
|
||||
/*
|
||||
* The original object just had its reference count
|
||||
* incremented for being inserted into the new package.
|
||||
*/
|
||||
*return_object_ptr = new_object; /* New Package object */
|
||||
data->flags |= ACPI_OBJECT_REPAIRED;
|
||||
return (AE_OK);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -165,22 +175,27 @@ acpi_ns_repair_object(struct acpi_predefined_data *data,
|
|||
|
||||
/* Object was successfully repaired */
|
||||
|
||||
/*
|
||||
* If the original object is a package element, we need to:
|
||||
* 1. Set the reference count of the new object to match the
|
||||
* reference count of the old object.
|
||||
* 2. Decrement the reference count of the original object.
|
||||
*/
|
||||
if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
|
||||
new_object->common.reference_count =
|
||||
return_object->common.reference_count;
|
||||
/*
|
||||
* The original object is a package element. We need to
|
||||
* decrement the reference count of the original object,
|
||||
* for removing it from the package.
|
||||
*
|
||||
* However, if the original object was just wrapped with a
|
||||
* package object as part of the repair, we don't need to
|
||||
* change the reference count.
|
||||
*/
|
||||
if (!(data->flags & ACPI_OBJECT_WRAPPED)) {
|
||||
new_object->common.reference_count =
|
||||
return_object->common.reference_count;
|
||||
|
||||
if (return_object->common.reference_count > 1) {
|
||||
return_object->common.reference_count--;
|
||||
if (return_object->common.reference_count > 1) {
|
||||
return_object->common.reference_count--;
|
||||
}
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
|
||||
"%s: Converted %s to expected %s at index %u\n",
|
||||
"%s: Converted %s to expected %s at Package index %u\n",
|
||||
data->pathname,
|
||||
acpi_ut_get_object_type_name(return_object),
|
||||
acpi_ut_get_object_type_name(new_object),
|
||||
|
@ -451,65 +466,6 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
|
|||
return (AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ns_convert_to_package
|
||||
*
|
||||
* PARAMETERS: original_object - Object to be converted
|
||||
* return_object - Where the new converted object is returned
|
||||
*
|
||||
* RETURN: Status. AE_OK if conversion was successful.
|
||||
*
|
||||
* DESCRIPTION: Attempt to convert a Buffer object to a Package. Each byte of
|
||||
* the buffer is converted to a single integer package element.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
static acpi_status
|
||||
acpi_ns_convert_to_package(union acpi_operand_object *original_object,
|
||||
union acpi_operand_object **return_object)
|
||||
{
|
||||
union acpi_operand_object *new_object;
|
||||
union acpi_operand_object **elements;
|
||||
u32 length;
|
||||
u8 *buffer;
|
||||
|
||||
switch (original_object->common.type) {
|
||||
case ACPI_TYPE_BUFFER:
|
||||
|
||||
/* Buffer-to-Package conversion */
|
||||
|
||||
length = original_object->buffer.length;
|
||||
new_object = acpi_ut_create_package_object(length);
|
||||
if (!new_object) {
|
||||
return (AE_NO_MEMORY);
|
||||
}
|
||||
|
||||
/* Convert each buffer byte to an integer package element */
|
||||
|
||||
elements = new_object->package.elements;
|
||||
buffer = original_object->buffer.pointer;
|
||||
|
||||
while (length--) {
|
||||
*elements =
|
||||
acpi_ut_create_integer_object((u64) *buffer);
|
||||
if (!*elements) {
|
||||
acpi_ut_remove_reference(new_object);
|
||||
return (AE_NO_MEMORY);
|
||||
}
|
||||
elements++;
|
||||
buffer++;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
return (AE_AML_OPERAND_TYPE);
|
||||
}
|
||||
|
||||
*return_object = new_object;
|
||||
return (AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ns_repair_null_element
|
||||
|
@ -677,55 +633,56 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
|
|||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ns_repair_package_list
|
||||
* FUNCTION: acpi_ns_wrap_with_package
|
||||
*
|
||||
* PARAMETERS: Data - Pointer to validation data structure
|
||||
* obj_desc_ptr - Pointer to the object to repair. The new
|
||||
* package object is returned here,
|
||||
* overwriting the old object.
|
||||
* original_object - Pointer to the object to repair.
|
||||
* obj_desc_ptr - The new package object is returned here
|
||||
*
|
||||
* RETURN: Status, new object in *obj_desc_ptr
|
||||
*
|
||||
* DESCRIPTION: Repair a common problem with objects that are defined to return
|
||||
* a variable-length Package of Packages. If the variable-length
|
||||
* is one, some BIOS code mistakenly simply declares a single
|
||||
* Package instead of a Package with one sub-Package. This
|
||||
* function attempts to repair this error by wrapping a Package
|
||||
* object around the original Package, creating the correct
|
||||
* Package with one sub-Package.
|
||||
* DESCRIPTION: Repair a common problem with objects that are defined to
|
||||
* return a variable-length Package of sub-objects. If there is
|
||||
* only one sub-object, some BIOS code mistakenly simply declares
|
||||
* the single object instead of a Package with one sub-object.
|
||||
* This function attempts to repair this error by wrapping a
|
||||
* Package object around the original object, creating the
|
||||
* correct and expected Package with one sub-object.
|
||||
*
|
||||
* Names that can be repaired in this manner include:
|
||||
* _ALR, _CSD, _HPX, _MLS, _PRT, _PSS, _TRT, TSS
|
||||
* _ALR, _CSD, _HPX, _MLS, _PLD, _PRT, _PSS, _TRT, _TSS,
|
||||
* _BCL, _DOD, _FIX, _Sx
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status
|
||||
acpi_ns_repair_package_list(struct acpi_predefined_data *data,
|
||||
union acpi_operand_object **obj_desc_ptr)
|
||||
acpi_ns_wrap_with_package(struct acpi_predefined_data *data,
|
||||
union acpi_operand_object *original_object,
|
||||
union acpi_operand_object **obj_desc_ptr)
|
||||
{
|
||||
union acpi_operand_object *pkg_obj_desc;
|
||||
|
||||
ACPI_FUNCTION_NAME(ns_repair_package_list);
|
||||
ACPI_FUNCTION_NAME(ns_wrap_with_package);
|
||||
|
||||
/*
|
||||
* Create the new outer package and populate it. The new package will
|
||||
* have a single element, the lone subpackage.
|
||||
* have a single element, the lone sub-object.
|
||||
*/
|
||||
pkg_obj_desc = acpi_ut_create_package_object(1);
|
||||
if (!pkg_obj_desc) {
|
||||
return (AE_NO_MEMORY);
|
||||
}
|
||||
|
||||
pkg_obj_desc->package.elements[0] = *obj_desc_ptr;
|
||||
pkg_obj_desc->package.elements[0] = original_object;
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
|
||||
"%s: Wrapped %s with expected Package object\n",
|
||||
data->pathname,
|
||||
acpi_ut_get_object_type_name(original_object)));
|
||||
|
||||
/* Return the new object in the object pointer */
|
||||
|
||||
*obj_desc_ptr = pkg_obj_desc;
|
||||
data->flags |= ACPI_OBJECT_REPAIRED;
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
|
||||
"%s: Repaired incorrectly formed Package\n",
|
||||
data->pathname));
|
||||
|
||||
data->flags |= ACPI_OBJECT_REPAIRED | ACPI_OBJECT_WRAPPED;
|
||||
return (AE_OK);
|
||||
}
|
||||
|
|
|
@ -341,7 +341,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
|
|||
|
||||
if (!acpi_ns_valid_path_separator(*external_name) &&
|
||||
(*external_name != 0)) {
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
return_ACPI_STATUS(AE_BAD_PATHNAME);
|
||||
}
|
||||
|
||||
/* Move on the next segment */
|
||||
|
|
|
@ -363,10 +363,6 @@ static void acpi_tb_convert_fadt(void)
|
|||
u32 address32;
|
||||
u32 i;
|
||||
|
||||
/* Update the local FADT table header length */
|
||||
|
||||
acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
|
||||
|
||||
/*
|
||||
* Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary.
|
||||
* Later code will always use the X 64-bit field. Also, check for an
|
||||
|
@ -408,6 +404,10 @@ static void acpi_tb_convert_fadt(void)
|
|||
acpi_gbl_FADT.boot_flags = 0;
|
||||
}
|
||||
|
||||
/* Update the local FADT table header length */
|
||||
|
||||
acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
|
||||
|
||||
/*
|
||||
* Expand the ACPI 1.0 32-bit addresses to the ACPI 2.0 64-bit "X"
|
||||
* generic address structures as necessary. Later code will always use
|
||||
|
|
|
@ -114,7 +114,6 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
|
|||
{
|
||||
u32 i;
|
||||
acpi_status status = AE_OK;
|
||||
struct acpi_table_header *override_table = NULL;
|
||||
|
||||
ACPI_FUNCTION_TRACE(tb_add_table);
|
||||
|
||||
|
@ -224,25 +223,10 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
|
|||
/*
|
||||
* ACPI Table Override:
|
||||
* Allow the host to override dynamically loaded tables.
|
||||
* NOTE: the table is fully mapped at this point, and the mapping will
|
||||
* be deleted by tb_table_override if the table is actually overridden.
|
||||
*/
|
||||
status = acpi_os_table_override(table_desc->pointer, &override_table);
|
||||
if (ACPI_SUCCESS(status) && override_table) {
|
||||
ACPI_INFO((AE_INFO,
|
||||
"%4.4s @ 0x%p Table override, replaced with:",
|
||||
table_desc->pointer->signature,
|
||||
ACPI_CAST_PTR(void, table_desc->address)));
|
||||
|
||||
/* We can delete the table that was passed as a parameter */
|
||||
|
||||
acpi_tb_delete_table(table_desc);
|
||||
|
||||
/* Setup descriptor for the new table */
|
||||
|
||||
table_desc->address = ACPI_PTR_TO_PHYSADDR(override_table);
|
||||
table_desc->pointer = override_table;
|
||||
table_desc->length = override_table->length;
|
||||
table_desc->flags = ACPI_TABLE_ORIGIN_OVERRIDE;
|
||||
}
|
||||
(void)acpi_tb_table_override(table_desc->pointer, table_desc);
|
||||
|
||||
/* Add the table to the global root table list */
|
||||
|
||||
|
@ -261,6 +245,95 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
|
|||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_tb_table_override
|
||||
*
|
||||
* PARAMETERS: table_header - Header for the original table
|
||||
* table_desc - Table descriptor initialized for the
|
||||
* original table. May or may not be mapped.
|
||||
*
|
||||
* RETURN: Pointer to the entire new table. NULL if table not overridden.
|
||||
* If overridden, installs the new table within the input table
|
||||
* descriptor.
|
||||
*
|
||||
* DESCRIPTION: Attempt table override by calling the OSL override functions.
|
||||
* Note: If the table is overridden, then the entire new table
|
||||
* is mapped and returned by this function.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
|
||||
*table_header,
|
||||
struct acpi_table_desc
|
||||
*table_desc)
|
||||
{
|
||||
acpi_status status;
|
||||
struct acpi_table_header *new_table = NULL;
|
||||
acpi_physical_address new_address = 0;
|
||||
u32 new_table_length = 0;
|
||||
u8 new_flags;
|
||||
char *override_type;
|
||||
|
||||
/* (1) Attempt logical override (returns a logical address) */
|
||||
|
||||
status = acpi_os_table_override(table_header, &new_table);
|
||||
if (ACPI_SUCCESS(status) && new_table) {
|
||||
new_address = ACPI_PTR_TO_PHYSADDR(new_table);
|
||||
new_table_length = new_table->length;
|
||||
new_flags = ACPI_TABLE_ORIGIN_OVERRIDE;
|
||||
override_type = "Logical";
|
||||
goto finish_override;
|
||||
}
|
||||
|
||||
/* (2) Attempt physical override (returns a physical address) */
|
||||
|
||||
status = acpi_os_physical_table_override(table_header,
|
||||
&new_address,
|
||||
&new_table_length);
|
||||
if (ACPI_SUCCESS(status) && new_address && new_table_length) {
|
||||
|
||||
/* Map the entire new table */
|
||||
|
||||
new_table = acpi_os_map_memory(new_address, new_table_length);
|
||||
if (!new_table) {
|
||||
ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
|
||||
"%4.4s %p Attempted physical table override failed",
|
||||
table_header->signature,
|
||||
ACPI_CAST_PTR(void,
|
||||
table_desc->address)));
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
override_type = "Physical";
|
||||
new_flags = ACPI_TABLE_ORIGIN_MAPPED;
|
||||
goto finish_override;
|
||||
}
|
||||
|
||||
return (NULL); /* There was no override */
|
||||
|
||||
finish_override:
|
||||
|
||||
ACPI_INFO((AE_INFO,
|
||||
"%4.4s %p %s table override, new table: %p",
|
||||
table_header->signature,
|
||||
ACPI_CAST_PTR(void, table_desc->address),
|
||||
override_type, new_table));
|
||||
|
||||
/* We can now unmap/delete the original table (if fully mapped) */
|
||||
|
||||
acpi_tb_delete_table(table_desc);
|
||||
|
||||
/* Setup descriptor for the new table */
|
||||
|
||||
table_desc->address = new_address;
|
||||
table_desc->pointer = new_table;
|
||||
table_desc->length = new_table_length;
|
||||
table_desc->flags = new_flags;
|
||||
|
||||
return (new_table);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_tb_resize_root_table_list
|
||||
|
@ -396,7 +469,11 @@ void acpi_tb_delete_table(struct acpi_table_desc *table_desc)
|
|||
case ACPI_TABLE_ORIGIN_ALLOCATED:
|
||||
ACPI_FREE(table_desc->pointer);
|
||||
break;
|
||||
default:;
|
||||
|
||||
/* Not mapped or allocated, there is nothing we can do */
|
||||
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
table_desc->pointer = NULL;
|
||||
|
|
|
@ -118,6 +118,7 @@ acpi_tb_check_xsdt(acpi_physical_address address)
|
|||
return AE_OK;
|
||||
}
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_tb_initialize_facs
|
||||
|
@ -148,6 +149,7 @@ acpi_status acpi_tb_initialize_facs(void)
|
|||
&acpi_gbl_FACS));
|
||||
return status;
|
||||
}
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
|
@ -444,7 +446,7 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
|
|||
* RETURN: None
|
||||
*
|
||||
* DESCRIPTION: Install an ACPI table into the global data structure. The
|
||||
* table override mechanism is implemented here to allow the host
|
||||
* table override mechanism is called to allow the host
|
||||
* OS to replace any table before it is installed in the root
|
||||
* table array.
|
||||
*
|
||||
|
@ -454,11 +456,9 @@ void
|
|||
acpi_tb_install_table(acpi_physical_address address,
|
||||
char *signature, u32 table_index)
|
||||
{
|
||||
u8 flags;
|
||||
acpi_status status;
|
||||
struct acpi_table_header *table_to_install;
|
||||
struct acpi_table_header *mapped_table;
|
||||
struct acpi_table_header *override_table = NULL;
|
||||
struct acpi_table_header *table;
|
||||
struct acpi_table_header *final_table;
|
||||
struct acpi_table_desc *table_desc;
|
||||
|
||||
if (!address) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
|
@ -469,69 +469,78 @@ acpi_tb_install_table(acpi_physical_address address,
|
|||
|
||||
/* Map just the table header */
|
||||
|
||||
mapped_table =
|
||||
acpi_os_map_memory(address, sizeof(struct acpi_table_header));
|
||||
if (!mapped_table) {
|
||||
table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
|
||||
if (!table) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not map memory for table [%s] at %p",
|
||||
signature, ACPI_CAST_PTR(void, address)));
|
||||
return;
|
||||
}
|
||||
|
||||
/* If a particular signature is expected (DSDT/FACS), it must match */
|
||||
|
||||
if (signature && !ACPI_COMPARE_NAME(mapped_table->signature, signature)) {
|
||||
if (signature && !ACPI_COMPARE_NAME(table->signature, signature)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Invalid signature 0x%X for ACPI table, expected [%s]",
|
||||
*ACPI_CAST_PTR(u32, mapped_table->signature),
|
||||
signature));
|
||||
*ACPI_CAST_PTR(u32, table->signature), signature));
|
||||
goto unmap_and_exit;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the table entry. Set the pointer to NULL, since the
|
||||
* table is not fully mapped at this time.
|
||||
*/
|
||||
table_desc = &acpi_gbl_root_table_list.tables[table_index];
|
||||
|
||||
table_desc->address = address;
|
||||
table_desc->pointer = NULL;
|
||||
table_desc->length = table->length;
|
||||
table_desc->flags = ACPI_TABLE_ORIGIN_MAPPED;
|
||||
ACPI_MOVE_32_TO_32(table_desc->signature.ascii, table->signature);
|
||||
|
||||
/*
|
||||
* ACPI Table Override:
|
||||
*
|
||||
* Before we install the table, let the host OS override it with a new
|
||||
* one if desired. Any table within the RSDT/XSDT can be replaced,
|
||||
* including the DSDT which is pointed to by the FADT.
|
||||
*
|
||||
* NOTE: If the table is overridden, then final_table will contain a
|
||||
* mapped pointer to the full new table. If the table is not overridden,
|
||||
* or if there has been a physical override, then the table will be
|
||||
* fully mapped later (in verify table). In any case, we must
|
||||
* unmap the header that was mapped above.
|
||||
*/
|
||||
status = acpi_os_table_override(mapped_table, &override_table);
|
||||
if (ACPI_SUCCESS(status) && override_table) {
|
||||
ACPI_INFO((AE_INFO,
|
||||
"%4.4s @ 0x%p Table override, replaced with:",
|
||||
mapped_table->signature, ACPI_CAST_PTR(void,
|
||||
address)));
|
||||
|
||||
acpi_gbl_root_table_list.tables[table_index].pointer =
|
||||
override_table;
|
||||
address = ACPI_PTR_TO_PHYSADDR(override_table);
|
||||
|
||||
table_to_install = override_table;
|
||||
flags = ACPI_TABLE_ORIGIN_OVERRIDE;
|
||||
} else {
|
||||
table_to_install = mapped_table;
|
||||
flags = ACPI_TABLE_ORIGIN_MAPPED;
|
||||
final_table = acpi_tb_table_override(table, table_desc);
|
||||
if (!final_table) {
|
||||
final_table = table; /* There was no override */
|
||||
}
|
||||
|
||||
/* Initialize the table entry */
|
||||
acpi_tb_print_table_header(table_desc->address, final_table);
|
||||
|
||||
acpi_gbl_root_table_list.tables[table_index].address = address;
|
||||
acpi_gbl_root_table_list.tables[table_index].length =
|
||||
table_to_install->length;
|
||||
acpi_gbl_root_table_list.tables[table_index].flags = flags;
|
||||
|
||||
ACPI_MOVE_32_TO_32(&
|
||||
(acpi_gbl_root_table_list.tables[table_index].
|
||||
signature), table_to_install->signature);
|
||||
|
||||
acpi_tb_print_table_header(address, table_to_install);
|
||||
/* Set the global integer width (based upon revision of the DSDT) */
|
||||
|
||||
if (table_index == ACPI_TABLE_INDEX_DSDT) {
|
||||
acpi_ut_set_integer_width(final_table->revision);
|
||||
}
|
||||
|
||||
/* Global integer width is based upon revision of the DSDT */
|
||||
|
||||
acpi_ut_set_integer_width(table_to_install->revision);
|
||||
/*
|
||||
* If we have a physical override during this early loading of the ACPI
|
||||
* tables, unmap the table for now. It will be mapped again later when
|
||||
* it is actually used. This supports very early loading of ACPI tables,
|
||||
* before virtual memory is fully initialized and running within the
|
||||
* host OS. Note: A logical override has the ACPI_TABLE_ORIGIN_OVERRIDE
|
||||
* flag set and will not be deleted below.
|
||||
*/
|
||||
if (final_table != table) {
|
||||
acpi_tb_delete_table(table_desc);
|
||||
}
|
||||
|
||||
unmap_and_exit:
|
||||
acpi_os_unmap_memory(mapped_table, sizeof(struct acpi_table_header));
|
||||
|
||||
/* Always unmap the table header that we mapped above */
|
||||
|
||||
acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
|
|
|
@ -497,19 +497,20 @@ char *acpi_ut_get_mutex_name(u32 mutex_id)
|
|||
|
||||
/* Names for Notify() values, used for debug output */
|
||||
|
||||
static const char *acpi_gbl_notify_value_names[] = {
|
||||
"Bus Check",
|
||||
"Device Check",
|
||||
"Device Wake",
|
||||
"Eject Request",
|
||||
"Device Check Light",
|
||||
"Frequency Mismatch",
|
||||
"Bus Mode Mismatch",
|
||||
"Power Fault",
|
||||
"Capabilities Check",
|
||||
"Device PLD Check",
|
||||
"Reserved",
|
||||
"System Locality Update"
|
||||
static const char *acpi_gbl_notify_value_names[ACPI_NOTIFY_MAX + 1] = {
|
||||
/* 00 */ "Bus Check",
|
||||
/* 01 */ "Device Check",
|
||||
/* 02 */ "Device Wake",
|
||||
/* 03 */ "Eject Request",
|
||||
/* 04 */ "Device Check Light",
|
||||
/* 05 */ "Frequency Mismatch",
|
||||
/* 06 */ "Bus Mode Mismatch",
|
||||
/* 07 */ "Power Fault",
|
||||
/* 08 */ "Capabilities Check",
|
||||
/* 09 */ "Device PLD Check",
|
||||
/* 10 */ "Reserved",
|
||||
/* 11 */ "System Locality Update",
|
||||
/* 12 */ "Shutdown Request"
|
||||
};
|
||||
|
||||
const char *acpi_ut_get_notify_name(u32 notify_value)
|
||||
|
@ -519,9 +520,10 @@ const char *acpi_ut_get_notify_name(u32 notify_value)
|
|||
return (acpi_gbl_notify_value_names[notify_value]);
|
||||
} else if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
|
||||
return ("Reserved");
|
||||
} else { /* Greater or equal to 0x80 */
|
||||
|
||||
return ("**Device Specific**");
|
||||
} else if (notify_value <= ACPI_MAX_DEVICE_SPECIFIC_NOTIFY) {
|
||||
return ("Device Specific");
|
||||
} else {
|
||||
return ("Hardware Specific");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -140,6 +140,7 @@ const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
|
|||
{NULL, ACPI_TYPE_ANY, NULL}
|
||||
};
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
/******************************************************************************
|
||||
*
|
||||
* Event and Hardware globals
|
||||
|
@ -236,6 +237,7 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
|
|||
ACPI_BITMASK_RT_CLOCK_STATUS,
|
||||
ACPI_BITMASK_RT_CLOCK_ENABLE},
|
||||
};
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
|
@ -286,6 +288,8 @@ acpi_status acpi_ut_init_globals(void)
|
|||
|
||||
acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
|
||||
/* GPE support */
|
||||
|
||||
acpi_gbl_gpe_xrupt_list_head = NULL;
|
||||
|
@ -294,6 +298,10 @@ acpi_status acpi_ut_init_globals(void)
|
|||
acpi_current_gpe_count = 0;
|
||||
acpi_gbl_all_gpes_initialized = FALSE;
|
||||
|
||||
acpi_gbl_global_event_handler = NULL;
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
||||
/* Global handlers */
|
||||
|
||||
acpi_gbl_system_notify.handler = NULL;
|
||||
|
@ -302,7 +310,6 @@ acpi_status acpi_ut_init_globals(void)
|
|||
acpi_gbl_init_handler = NULL;
|
||||
acpi_gbl_table_handler = NULL;
|
||||
acpi_gbl_interface_handler = NULL;
|
||||
acpi_gbl_global_event_handler = NULL;
|
||||
|
||||
/* Global Lock support */
|
||||
|
||||
|
|
|
@ -53,27 +53,35 @@ ACPI_MODULE_NAME("utinit")
|
|||
/* Local prototypes */
|
||||
static void acpi_ut_terminate(void);
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
|
||||
static void acpi_ut_free_gpe_lists(void);
|
||||
|
||||
#else
|
||||
|
||||
#define acpi_ut_free_gpe_lists()
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
/******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ut_terminate
|
||||
* FUNCTION: acpi_ut_free_gpe_lists
|
||||
*
|
||||
* PARAMETERS: none
|
||||
*
|
||||
* RETURN: none
|
||||
*
|
||||
* DESCRIPTION: Free global memory
|
||||
* DESCRIPTION: Free global GPE lists
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
static void acpi_ut_terminate(void)
|
||||
static void acpi_ut_free_gpe_lists(void)
|
||||
{
|
||||
struct acpi_gpe_block_info *gpe_block;
|
||||
struct acpi_gpe_block_info *next_gpe_block;
|
||||
struct acpi_gpe_xrupt_info *gpe_xrupt_info;
|
||||
struct acpi_gpe_xrupt_info *next_gpe_xrupt_info;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ut_terminate);
|
||||
|
||||
/* Free global GPE blocks and related info structures */
|
||||
|
||||
gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
|
||||
|
@ -91,7 +99,26 @@ static void acpi_ut_terminate(void)
|
|||
ACPI_FREE(gpe_xrupt_info);
|
||||
gpe_xrupt_info = next_gpe_xrupt_info;
|
||||
}
|
||||
}
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ut_terminate
|
||||
*
|
||||
* PARAMETERS: none
|
||||
*
|
||||
* RETURN: none
|
||||
*
|
||||
* DESCRIPTION: Free global memory
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
static void acpi_ut_terminate(void)
|
||||
{
|
||||
ACPI_FUNCTION_TRACE(ut_terminate);
|
||||
|
||||
acpi_ut_free_gpe_lists();
|
||||
acpi_ut_delete_address_lists();
|
||||
return_VOID;
|
||||
}
|
||||
|
|
|
@ -145,6 +145,8 @@ acpi_status acpi_enable_subsystem(u32 flags)
|
|||
|
||||
ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
|
||||
/* Enable ACPI mode */
|
||||
|
||||
if (!(flags & ACPI_NO_ACPI_ENABLE)) {
|
||||
|
@ -169,6 +171,7 @@ acpi_status acpi_enable_subsystem(u32 flags)
|
|||
ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
||||
/*
|
||||
* Install the default op_region handlers. These are installed unless
|
||||
|
@ -184,7 +187,7 @@ acpi_status acpi_enable_subsystem(u32 flags)
|
|||
return_ACPI_STATUS(status);
|
||||
}
|
||||
}
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
/*
|
||||
* Initialize ACPI Event handling (Fixed and General Purpose)
|
||||
*
|
||||
|
@ -220,6 +223,7 @@ acpi_status acpi_enable_subsystem(u32 flags)
|
|||
return_ACPI_STATUS(status);
|
||||
}
|
||||
}
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
|
|
@ -558,33 +558,48 @@ void apei_resources_release(struct apei_resources *resources)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(apei_resources_release);
|
||||
|
||||
static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
|
||||
static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
|
||||
u32 *access_bit_width)
|
||||
{
|
||||
u32 width, space_id;
|
||||
u32 bit_width, bit_offset, access_size_code, space_id;
|
||||
|
||||
width = reg->bit_width;
|
||||
bit_width = reg->bit_width;
|
||||
bit_offset = reg->bit_offset;
|
||||
access_size_code = reg->access_width;
|
||||
space_id = reg->space_id;
|
||||
/* Handle possible alignment issues */
|
||||
memcpy(paddr, ®->address, sizeof(*paddr));
|
||||
if (!*paddr) {
|
||||
pr_warning(FW_BUG APEI_PFX
|
||||
"Invalid physical address in GAR [0x%llx/%u/%u]\n",
|
||||
*paddr, width, space_id);
|
||||
"Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
|
||||
*paddr, bit_width, bit_offset, access_size_code,
|
||||
space_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
|
||||
if (access_size_code < 1 || access_size_code > 4) {
|
||||
pr_warning(FW_BUG APEI_PFX
|
||||
"Invalid bit width in GAR [0x%llx/%u/%u]\n",
|
||||
*paddr, width, space_id);
|
||||
"Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n",
|
||||
*paddr, bit_width, bit_offset, access_size_code,
|
||||
space_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
*access_bit_width = 1UL << (access_size_code + 2);
|
||||
|
||||
if ((bit_width + bit_offset) > *access_bit_width) {
|
||||
pr_warning(FW_BUG APEI_PFX
|
||||
"Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
|
||||
*paddr, bit_width, bit_offset, access_size_code,
|
||||
space_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
|
||||
space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
|
||||
pr_warning(FW_BUG APEI_PFX
|
||||
"Invalid address space type in GAR [0x%llx/%u/%u]\n",
|
||||
*paddr, width, space_id);
|
||||
"Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n",
|
||||
*paddr, bit_width, bit_offset, access_size_code,
|
||||
space_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -595,23 +610,25 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
|
|||
int apei_read(u64 *val, struct acpi_generic_address *reg)
|
||||
{
|
||||
int rc;
|
||||
u32 access_bit_width;
|
||||
u64 address;
|
||||
acpi_status status;
|
||||
|
||||
rc = apei_check_gar(reg, &address);
|
||||
rc = apei_check_gar(reg, &address, &access_bit_width);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
*val = 0;
|
||||
switch(reg->space_id) {
|
||||
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
|
||||
status = acpi_os_read_memory64((acpi_physical_address)
|
||||
address, val, reg->bit_width);
|
||||
status = acpi_os_read_memory((acpi_physical_address) address,
|
||||
val, access_bit_width);
|
||||
if (ACPI_FAILURE(status))
|
||||
return -EIO;
|
||||
break;
|
||||
case ACPI_ADR_SPACE_SYSTEM_IO:
|
||||
status = acpi_os_read_port(address, (u32 *)val, reg->bit_width);
|
||||
status = acpi_os_read_port(address, (u32 *)val,
|
||||
access_bit_width);
|
||||
if (ACPI_FAILURE(status))
|
||||
return -EIO;
|
||||
break;
|
||||
|
@ -627,22 +644,23 @@ EXPORT_SYMBOL_GPL(apei_read);
|
|||
int apei_write(u64 val, struct acpi_generic_address *reg)
|
||||
{
|
||||
int rc;
|
||||
u32 access_bit_width;
|
||||
u64 address;
|
||||
acpi_status status;
|
||||
|
||||
rc = apei_check_gar(reg, &address);
|
||||
rc = apei_check_gar(reg, &address, &access_bit_width);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
switch (reg->space_id) {
|
||||
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
|
||||
status = acpi_os_write_memory64((acpi_physical_address)
|
||||
address, val, reg->bit_width);
|
||||
status = acpi_os_write_memory((acpi_physical_address) address,
|
||||
val, access_bit_width);
|
||||
if (ACPI_FAILURE(status))
|
||||
return -EIO;
|
||||
break;
|
||||
case ACPI_ADR_SPACE_SYSTEM_IO:
|
||||
status = acpi_os_write_port(address, val, reg->bit_width);
|
||||
status = acpi_os_write_port(address, val, access_bit_width);
|
||||
if (ACPI_FAILURE(status))
|
||||
return -EIO;
|
||||
break;
|
||||
|
@ -661,23 +679,24 @@ static int collect_res_callback(struct apei_exec_context *ctx,
|
|||
struct apei_resources *resources = data;
|
||||
struct acpi_generic_address *reg = &entry->register_region;
|
||||
u8 ins = entry->instruction;
|
||||
u32 access_bit_width;
|
||||
u64 paddr;
|
||||
int rc;
|
||||
|
||||
if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
|
||||
return 0;
|
||||
|
||||
rc = apei_check_gar(reg, &paddr);
|
||||
rc = apei_check_gar(reg, &paddr, &access_bit_width);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
switch (reg->space_id) {
|
||||
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
|
||||
return apei_res_add(&resources->iomem, paddr,
|
||||
reg->bit_width / 8);
|
||||
access_bit_width / 8);
|
||||
case ACPI_ADR_SPACE_SYSTEM_IO:
|
||||
return apei_res_add(&resources->ioport, paddr,
|
||||
reg->bit_width / 8);
|
||||
access_bit_width / 8);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -362,6 +362,7 @@ void apei_estatus_print(const char *pfx,
|
|||
gedata_len = gdata->error_data_length;
|
||||
apei_estatus_print_section(pfx, gdata, sec_no);
|
||||
data_len -= gedata_len + sizeof(*gdata);
|
||||
gdata = (void *)(gdata + 1) + gedata_len;
|
||||
sec_no++;
|
||||
}
|
||||
}
|
||||
|
@ -396,6 +397,7 @@ int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
|
|||
if (gedata_len > data_len - sizeof(*gdata))
|
||||
return -EINVAL;
|
||||
data_len -= gedata_len + sizeof(*gdata);
|
||||
gdata = (void *)(gdata + 1) + gedata_len;
|
||||
}
|
||||
if (data_len)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -74,6 +74,8 @@ struct vendor_error_type_extension {
|
|||
u8 reserved[3];
|
||||
};
|
||||
|
||||
static u32 notrigger;
|
||||
|
||||
static u32 vendor_flags;
|
||||
static struct debugfs_blob_wrapper vendor_blob;
|
||||
static char vendor_dev[64];
|
||||
|
@ -238,7 +240,7 @@ static void *einj_get_parameter_address(void)
|
|||
return v5param;
|
||||
}
|
||||
}
|
||||
if (paddrv4) {
|
||||
if (param_extension && paddrv4) {
|
||||
struct einj_parameter *v4param;
|
||||
|
||||
v4param = acpi_os_map_memory(paddrv4, sizeof(*v4param));
|
||||
|
@ -496,9 +498,11 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
|
|||
if (rc)
|
||||
return rc;
|
||||
trigger_paddr = apei_exec_ctx_get_output(&ctx);
|
||||
rc = __einj_error_trigger(trigger_paddr, type, param1, param2);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (notrigger == 0) {
|
||||
rc = __einj_error_trigger(trigger_paddr, type, param1, param2);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
|
||||
|
||||
return rc;
|
||||
|
@ -700,6 +704,11 @@ static int __init einj_init(void)
|
|||
einj_debug_dir, &error_param2);
|
||||
if (!fentry)
|
||||
goto err_unmap;
|
||||
|
||||
fentry = debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR,
|
||||
einj_debug_dir, ¬rigger);
|
||||
if (!fentry)
|
||||
goto err_unmap;
|
||||
}
|
||||
|
||||
if (vendor_dev[0]) {
|
||||
|
|
|
@ -917,7 +917,7 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
|
|||
{
|
||||
if ((erst_tab->header_length !=
|
||||
(sizeof(struct acpi_table_erst) - sizeof(erst_tab->header)))
|
||||
&& (erst_tab->header_length != sizeof(struct acpi_table_einj)))
|
||||
&& (erst_tab->header_length != sizeof(struct acpi_table_erst)))
|
||||
return -EINVAL;
|
||||
if (erst_tab->header.length < sizeof(struct acpi_table_erst))
|
||||
return -EINVAL;
|
||||
|
|
|
@ -0,0 +1,175 @@
|
|||
/*
|
||||
* Copyright 2012 Red Hat, Inc <mjg@redhat.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <acpi/acpi.h>
|
||||
#include <acpi/acpi_bus.h>
|
||||
|
||||
static struct acpi_table_bgrt *bgrt_tab;
|
||||
static struct kobject *bgrt_kobj;
|
||||
|
||||
struct bmp_header {
|
||||
u16 id;
|
||||
u32 size;
|
||||
} __attribute ((packed));
|
||||
|
||||
static struct bmp_header bmp_header;
|
||||
|
||||
static ssize_t show_version(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->version);
|
||||
}
|
||||
static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
|
||||
|
||||
static ssize_t show_status(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->status);
|
||||
}
|
||||
static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
|
||||
|
||||
static ssize_t show_type(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_type);
|
||||
}
|
||||
static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
|
||||
|
||||
static ssize_t show_xoffset(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_x);
|
||||
}
|
||||
static DEVICE_ATTR(xoffset, S_IRUGO, show_xoffset, NULL);
|
||||
|
||||
static ssize_t show_yoffset(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_y);
|
||||
}
|
||||
static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL);
|
||||
|
||||
static ssize_t show_image(struct file *file, struct kobject *kobj,
|
||||
struct bin_attribute *attr, char *buf, loff_t off, size_t count)
|
||||
{
|
||||
int size = attr->size;
|
||||
void __iomem *image = attr->private;
|
||||
|
||||
if (off >= size) {
|
||||
count = 0;
|
||||
} else {
|
||||
if (off + count > size)
|
||||
count = size - off;
|
||||
|
||||
memcpy_fromio(buf, image+off, count);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct bin_attribute image_attr = {
|
||||
.attr = {
|
||||
.name = "image",
|
||||
.mode = S_IRUGO,
|
||||
},
|
||||
.read = show_image,
|
||||
};
|
||||
|
||||
static struct attribute *bgrt_attributes[] = {
|
||||
&dev_attr_version.attr,
|
||||
&dev_attr_status.attr,
|
||||
&dev_attr_type.attr,
|
||||
&dev_attr_xoffset.attr,
|
||||
&dev_attr_yoffset.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group bgrt_attribute_group = {
|
||||
.attrs = bgrt_attributes,
|
||||
};
|
||||
|
||||
static int __init bgrt_init(void)
|
||||
{
|
||||
acpi_status status;
|
||||
int ret;
|
||||
void __iomem *bgrt;
|
||||
|
||||
if (acpi_disabled)
|
||||
return -ENODEV;
|
||||
|
||||
status = acpi_get_table("BGRT", 0,
|
||||
(struct acpi_table_header **)&bgrt_tab);
|
||||
|
||||
if (ACPI_FAILURE(status))
|
||||
return -ENODEV;
|
||||
|
||||
sysfs_bin_attr_init(&image_attr);
|
||||
|
||||
bgrt = ioremap(bgrt_tab->image_address, sizeof(struct bmp_header));
|
||||
|
||||
if (!bgrt) {
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
memcpy_fromio(&bmp_header, bgrt, sizeof(bmp_header));
|
||||
image_attr.size = bmp_header.size;
|
||||
iounmap(bgrt);
|
||||
|
||||
image_attr.private = ioremap(bgrt_tab->image_address, image_attr.size);
|
||||
|
||||
if (!image_attr.private) {
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
|
||||
bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
|
||||
if (!bgrt_kobj) {
|
||||
ret = -EINVAL;
|
||||
goto out_iounmap;
|
||||
}
|
||||
|
||||
ret = sysfs_create_group(bgrt_kobj, &bgrt_attribute_group);
|
||||
if (ret)
|
||||
goto out_kobject;
|
||||
|
||||
ret = sysfs_create_bin_file(bgrt_kobj, &image_attr);
|
||||
if (ret)
|
||||
goto out_group;
|
||||
|
||||
return 0;
|
||||
|
||||
out_group:
|
||||
sysfs_remove_group(bgrt_kobj, &bgrt_attribute_group);
|
||||
out_kobject:
|
||||
kobject_put(bgrt_kobj);
|
||||
out_iounmap:
|
||||
iounmap(image_attr.private);
|
||||
out_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit bgrt_exit(void)
|
||||
{
|
||||
iounmap(image_attr.private);
|
||||
sysfs_remove_group(bgrt_kobj, &bgrt_attribute_group);
|
||||
sysfs_remove_bin_file(bgrt_kobj, &image_attr);
|
||||
}
|
||||
|
||||
module_init(bgrt_init);
|
||||
module_exit(bgrt_exit);
|
||||
|
||||
MODULE_AUTHOR("Matthew Garrett");
|
||||
MODULE_DESCRIPTION("BGRT boot graphic support");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -1010,6 +1010,7 @@ static int __init acpi_bus_init(void)
|
|||
}
|
||||
|
||||
struct kobject *acpi_kobj;
|
||||
EXPORT_SYMBOL_GPL(acpi_kobj);
|
||||
|
||||
static int __init acpi_init(void)
|
||||
{
|
||||
|
|
|
@ -822,10 +822,10 @@ static int acpi_ec_add(struct acpi_device *device)
|
|||
first_ec = ec;
|
||||
device->driver_data = ec;
|
||||
|
||||
WARN(!request_region(ec->data_addr, 1, "EC data"),
|
||||
"Could not request EC data io port 0x%lx", ec->data_addr);
|
||||
WARN(!request_region(ec->command_addr, 1, "EC cmd"),
|
||||
"Could not request EC cmd io port 0x%lx", ec->command_addr);
|
||||
ret = !!request_region(ec->data_addr, 1, "EC data");
|
||||
WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
|
||||
ret = !!request_region(ec->command_addr, 1, "EC cmd");
|
||||
WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
|
||||
|
||||
pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
|
||||
ec->gpe, ec->command_addr, ec->data_addr);
|
||||
|
|
|
@ -95,8 +95,8 @@ static int suspend_nvs_register(unsigned long start, unsigned long size)
|
|||
{
|
||||
struct nvs_page *entry, *next;
|
||||
|
||||
pr_info("PM: Registering ACPI NVS region at %lx (%ld bytes)\n",
|
||||
start, size);
|
||||
pr_info("PM: Registering ACPI NVS region [mem %#010lx-%#010lx] (%ld bytes)\n",
|
||||
start, start + size - 1, size);
|
||||
|
||||
while (size > 0) {
|
||||
unsigned int nr_bytes;
|
||||
|
|
|
@ -77,6 +77,9 @@ EXPORT_SYMBOL(acpi_in_debugger);
|
|||
extern char line_buf[80];
|
||||
#endif /*ENABLE_DEBUGGER */
|
||||
|
||||
static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
|
||||
u32 pm1b_ctrl);
|
||||
|
||||
static acpi_osd_handler acpi_irq_handler;
|
||||
static void *acpi_irq_context;
|
||||
static struct workqueue_struct *kacpid_wq;
|
||||
|
@ -347,7 +350,7 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
|
|||
unsigned long pfn;
|
||||
|
||||
pfn = pg_off >> PAGE_SHIFT;
|
||||
if (page_is_ram(pfn))
|
||||
if (should_use_kmap(pfn))
|
||||
kunmap(pfn_to_page(pfn));
|
||||
else
|
||||
iounmap(vaddr);
|
||||
|
@ -554,6 +557,15 @@ acpi_os_table_override(struct acpi_table_header * existing_table,
|
|||
return AE_OK;
|
||||
}
|
||||
|
||||
acpi_status
|
||||
acpi_os_physical_table_override(struct acpi_table_header *existing_table,
|
||||
acpi_physical_address * new_address,
|
||||
u32 *new_table_length)
|
||||
{
|
||||
return AE_SUPPORT;
|
||||
}
|
||||
|
||||
|
||||
static irqreturn_t acpi_irq(int irq, void *dev_id)
|
||||
{
|
||||
u32 handled;
|
||||
|
@ -595,7 +607,8 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
|
|||
|
||||
acpi_irq_handler = handler;
|
||||
acpi_irq_context = context;
|
||||
if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
|
||||
if (request_threaded_irq(irq, NULL, acpi_irq, IRQF_SHARED, "acpi",
|
||||
acpi_irq)) {
|
||||
printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
|
||||
acpi_irq_handler = NULL;
|
||||
return AE_NOT_ACQUIRED;
|
||||
|
@ -699,49 +712,6 @@ acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
|
|||
|
||||
EXPORT_SYMBOL(acpi_os_write_port);
|
||||
|
||||
acpi_status
|
||||
acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
|
||||
{
|
||||
void __iomem *virt_addr;
|
||||
unsigned int size = width / 8;
|
||||
bool unmap = false;
|
||||
u32 dummy;
|
||||
|
||||
rcu_read_lock();
|
||||
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
|
||||
if (!virt_addr) {
|
||||
rcu_read_unlock();
|
||||
virt_addr = acpi_os_ioremap(phys_addr, size);
|
||||
if (!virt_addr)
|
||||
return AE_BAD_ADDRESS;
|
||||
unmap = true;
|
||||
}
|
||||
|
||||
if (!value)
|
||||
value = &dummy;
|
||||
|
||||
switch (width) {
|
||||
case 8:
|
||||
*(u8 *) value = readb(virt_addr);
|
||||
break;
|
||||
case 16:
|
||||
*(u16 *) value = readw(virt_addr);
|
||||
break;
|
||||
case 32:
|
||||
*(u32 *) value = readl(virt_addr);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
if (unmap)
|
||||
iounmap(virt_addr);
|
||||
else
|
||||
rcu_read_unlock();
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
#ifdef readq
|
||||
static inline u64 read64(const volatile void __iomem *addr)
|
||||
{
|
||||
|
@ -758,7 +728,7 @@ static inline u64 read64(const volatile void __iomem *addr)
|
|||
#endif
|
||||
|
||||
acpi_status
|
||||
acpi_os_read_memory64(acpi_physical_address phys_addr, u64 *value, u32 width)
|
||||
acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
|
||||
{
|
||||
void __iomem *virt_addr;
|
||||
unsigned int size = width / 8;
|
||||
|
@ -803,45 +773,6 @@ acpi_os_read_memory64(acpi_physical_address phys_addr, u64 *value, u32 width)
|
|||
return AE_OK;
|
||||
}
|
||||
|
||||
acpi_status
|
||||
acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
|
||||
{
|
||||
void __iomem *virt_addr;
|
||||
unsigned int size = width / 8;
|
||||
bool unmap = false;
|
||||
|
||||
rcu_read_lock();
|
||||
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
|
||||
if (!virt_addr) {
|
||||
rcu_read_unlock();
|
||||
virt_addr = acpi_os_ioremap(phys_addr, size);
|
||||
if (!virt_addr)
|
||||
return AE_BAD_ADDRESS;
|
||||
unmap = true;
|
||||
}
|
||||
|
||||
switch (width) {
|
||||
case 8:
|
||||
writeb(value, virt_addr);
|
||||
break;
|
||||
case 16:
|
||||
writew(value, virt_addr);
|
||||
break;
|
||||
case 32:
|
||||
writel(value, virt_addr);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
if (unmap)
|
||||
iounmap(virt_addr);
|
||||
else
|
||||
rcu_read_unlock();
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
#ifdef writeq
|
||||
static inline void write64(u64 val, volatile void __iomem *addr)
|
||||
{
|
||||
|
@ -856,7 +787,7 @@ static inline void write64(u64 val, volatile void __iomem *addr)
|
|||
#endif
|
||||
|
||||
acpi_status
|
||||
acpi_os_write_memory64(acpi_physical_address phys_addr, u64 value, u32 width)
|
||||
acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
|
||||
{
|
||||
void __iomem *virt_addr;
|
||||
unsigned int size = width / 8;
|
||||
|
@ -1641,3 +1572,24 @@ acpi_status acpi_os_terminate(void)
|
|||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
|
||||
u32 pm1b_control)
|
||||
{
|
||||
int rc = 0;
|
||||
if (__acpi_os_prepare_sleep)
|
||||
rc = __acpi_os_prepare_sleep(sleep_state,
|
||||
pm1a_control, pm1b_control);
|
||||
if (rc < 0)
|
||||
return AE_ERROR;
|
||||
else if (rc > 0)
|
||||
return AE_CTRL_SKIP;
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
|
||||
u32 pm1a_ctrl, u32 pm1b_ctrl))
|
||||
{
|
||||
__acpi_os_prepare_sleep = func;
|
||||
}
|
||||
|
|
|
@ -40,9 +40,11 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <acpi/acpi_bus.h>
|
||||
#include <acpi/acpi_drivers.h>
|
||||
#include "sleep.h"
|
||||
#include "internal.h"
|
||||
|
||||
#define PREFIX "ACPI: "
|
||||
|
||||
|
@ -77,6 +79,20 @@ static struct acpi_driver acpi_power_driver = {
|
|||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* A power managed device
|
||||
* A device may rely on multiple power resources.
|
||||
* */
|
||||
struct acpi_power_managed_device {
|
||||
struct device *dev; /* The physical device */
|
||||
acpi_handle *handle;
|
||||
};
|
||||
|
||||
struct acpi_power_resource_device {
|
||||
struct acpi_power_managed_device *device;
|
||||
struct acpi_power_resource_device *next;
|
||||
};
|
||||
|
||||
struct acpi_power_resource {
|
||||
struct acpi_device * device;
|
||||
acpi_bus_id name;
|
||||
|
@ -84,6 +100,9 @@ struct acpi_power_resource {
|
|||
u32 order;
|
||||
unsigned int ref_count;
|
||||
struct mutex resource_lock;
|
||||
|
||||
/* List of devices relying on this power resource */
|
||||
struct acpi_power_resource_device *devices;
|
||||
};
|
||||
|
||||
static struct list_head acpi_power_resource_list;
|
||||
|
@ -183,8 +202,26 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Resume the device when all power resources in _PR0 are on */
|
||||
static void acpi_power_on_device(struct acpi_power_managed_device *device)
|
||||
{
|
||||
struct acpi_device *acpi_dev;
|
||||
acpi_handle handle = device->handle;
|
||||
int state;
|
||||
|
||||
if (acpi_bus_get_device(handle, &acpi_dev))
|
||||
return;
|
||||
|
||||
if(acpi_power_get_inferred_state(acpi_dev, &state))
|
||||
return;
|
||||
|
||||
if (state == ACPI_STATE_D0 && pm_runtime_suspended(device->dev))
|
||||
pm_request_resume(device->dev);
|
||||
}
|
||||
|
||||
static int __acpi_power_on(struct acpi_power_resource *resource)
|
||||
{
|
||||
struct acpi_power_resource_device *device_list = resource->devices;
|
||||
acpi_status status = AE_OK;
|
||||
|
||||
status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
|
||||
|
@ -197,6 +234,12 @@ static int __acpi_power_on(struct acpi_power_resource *resource)
|
|||
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n",
|
||||
resource->name));
|
||||
|
||||
while (device_list) {
|
||||
acpi_power_on_device(device_list->device);
|
||||
|
||||
device_list = device_list->next;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -299,6 +342,125 @@ static int acpi_power_on_list(struct acpi_handle_list *list)
|
|||
return result;
|
||||
}
|
||||
|
||||
static void __acpi_power_resource_unregister_device(struct device *dev,
|
||||
acpi_handle res_handle)
|
||||
{
|
||||
struct acpi_power_resource *resource = NULL;
|
||||
struct acpi_power_resource_device *prev, *curr;
|
||||
|
||||
if (acpi_power_get_context(res_handle, &resource))
|
||||
return;
|
||||
|
||||
mutex_lock(&resource->resource_lock);
|
||||
prev = NULL;
|
||||
curr = resource->devices;
|
||||
while (curr) {
|
||||
if (curr->device->dev == dev) {
|
||||
if (!prev)
|
||||
resource->devices = curr->next;
|
||||
else
|
||||
prev->next = curr->next;
|
||||
|
||||
kfree(curr);
|
||||
break;
|
||||
}
|
||||
|
||||
prev = curr;
|
||||
curr = curr->next;
|
||||
}
|
||||
mutex_unlock(&resource->resource_lock);
|
||||
}
|
||||
|
||||
/* Unlink dev from all power resources in _PR0 */
|
||||
void acpi_power_resource_unregister_device(struct device *dev, acpi_handle handle)
|
||||
{
|
||||
struct acpi_device *acpi_dev;
|
||||
struct acpi_handle_list *list;
|
||||
int i;
|
||||
|
||||
if (!dev || !handle)
|
||||
return;
|
||||
|
||||
if (acpi_bus_get_device(handle, &acpi_dev))
|
||||
return;
|
||||
|
||||
list = &acpi_dev->power.states[ACPI_STATE_D0].resources;
|
||||
|
||||
for (i = 0; i < list->count; i++)
|
||||
__acpi_power_resource_unregister_device(dev,
|
||||
list->handles[i]);
|
||||
}
|
||||
|
||||
static int __acpi_power_resource_register_device(
|
||||
struct acpi_power_managed_device *powered_device, acpi_handle handle)
|
||||
{
|
||||
struct acpi_power_resource *resource = NULL;
|
||||
struct acpi_power_resource_device *power_resource_device;
|
||||
int result;
|
||||
|
||||
result = acpi_power_get_context(handle, &resource);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
power_resource_device = kzalloc(
|
||||
sizeof(*power_resource_device), GFP_KERNEL);
|
||||
if (!power_resource_device)
|
||||
return -ENOMEM;
|
||||
|
||||
power_resource_device->device = powered_device;
|
||||
|
||||
mutex_lock(&resource->resource_lock);
|
||||
power_resource_device->next = resource->devices;
|
||||
resource->devices = power_resource_device;
|
||||
mutex_unlock(&resource->resource_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Link dev to all power resources in _PR0 */
|
||||
int acpi_power_resource_register_device(struct device *dev, acpi_handle handle)
|
||||
{
|
||||
struct acpi_device *acpi_dev;
|
||||
struct acpi_handle_list *list;
|
||||
struct acpi_power_managed_device *powered_device;
|
||||
int i, ret;
|
||||
|
||||
if (!dev || !handle)
|
||||
return -ENODEV;
|
||||
|
||||
ret = acpi_bus_get_device(handle, &acpi_dev);
|
||||
if (ret)
|
||||
goto no_power_resource;
|
||||
|
||||
if (!acpi_dev->power.flags.power_resources)
|
||||
goto no_power_resource;
|
||||
|
||||
powered_device = kzalloc(sizeof(*powered_device), GFP_KERNEL);
|
||||
if (!powered_device)
|
||||
return -ENOMEM;
|
||||
|
||||
powered_device->dev = dev;
|
||||
powered_device->handle = handle;
|
||||
|
||||
list = &acpi_dev->power.states[ACPI_STATE_D0].resources;
|
||||
|
||||
for (i = 0; i < list->count; i++) {
|
||||
ret = __acpi_power_resource_register_device(powered_device,
|
||||
list->handles[i]);
|
||||
|
||||
if (ret) {
|
||||
acpi_power_resource_unregister_device(dev, handle);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
no_power_resource:
|
||||
printk(KERN_WARNING PREFIX "Invalid Power Resource to register!");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
|
||||
* ACPI 3.0) _PSW (Power State Wake)
|
||||
|
@ -500,14 +662,14 @@ int acpi_power_transition(struct acpi_device *device, int state)
|
|||
{
|
||||
int result;
|
||||
|
||||
if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
|
||||
if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
|
||||
return -EINVAL;
|
||||
|
||||
if (device->power.state == state)
|
||||
return 0;
|
||||
|
||||
if ((device->power.state < ACPI_STATE_D0)
|
||||
|| (device->power.state > ACPI_STATE_D3))
|
||||
|| (device->power.state > ACPI_STATE_D3_COLD))
|
||||
return -ENODEV;
|
||||
|
||||
/* TBD: Resources must be ordered. */
|
||||
|
|
|
@ -67,6 +67,7 @@
|
|||
#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
|
||||
#define ACPI_PROCESSOR_NOTIFY_POWER 0x81
|
||||
#define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82
|
||||
#define ACPI_PROCESSOR_DEVICE_HID "ACPI0007"
|
||||
|
||||
#define ACPI_PROCESSOR_LIMIT_USER 0
|
||||
#define ACPI_PROCESSOR_LIMIT_THERMAL 1
|
||||
|
@ -87,7 +88,7 @@ static int acpi_processor_start(struct acpi_processor *pr);
|
|||
|
||||
static const struct acpi_device_id processor_device_ids[] = {
|
||||
{ACPI_PROCESSOR_OBJECT_HID, 0},
|
||||
{"ACPI0007", 0},
|
||||
{ACPI_PROCESSOR_DEVICE_HID, 0},
|
||||
{"", 0},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, processor_device_ids);
|
||||
|
@ -535,8 +536,8 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
|
|||
return -ENOMEM;
|
||||
|
||||
if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
|
||||
kfree(pr);
|
||||
return -ENOMEM;
|
||||
result = -ENOMEM;
|
||||
goto err_free_pr;
|
||||
}
|
||||
|
||||
pr->handle = device->handle;
|
||||
|
@ -576,7 +577,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
|
|||
dev = get_cpu_device(pr->id);
|
||||
if (sysfs_create_link(&device->dev.kobj, &dev->kobj, "sysdev")) {
|
||||
result = -EFAULT;
|
||||
goto err_free_cpumask;
|
||||
goto err_clear_processor;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -594,9 +595,15 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
|
|||
|
||||
err_remove_sysfs:
|
||||
sysfs_remove_link(&device->dev.kobj, "sysdev");
|
||||
err_clear_processor:
|
||||
/*
|
||||
* processor_device_array is not cleared to allow checks for buggy BIOS
|
||||
*/
|
||||
per_cpu(processors, pr->id) = NULL;
|
||||
err_free_cpumask:
|
||||
free_cpumask_var(pr->throttling.shared_cpu_map);
|
||||
|
||||
err_free_pr:
|
||||
kfree(pr);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -741,20 +748,46 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
|
|||
return;
|
||||
}
|
||||
|
||||
static acpi_status is_processor_device(acpi_handle handle)
|
||||
{
|
||||
struct acpi_device_info *info;
|
||||
char *hid;
|
||||
acpi_status status;
|
||||
|
||||
status = acpi_get_object_info(handle, &info);
|
||||
if (ACPI_FAILURE(status))
|
||||
return status;
|
||||
|
||||
if (info->type == ACPI_TYPE_PROCESSOR) {
|
||||
kfree(info);
|
||||
return AE_OK; /* found a processor object */
|
||||
}
|
||||
|
||||
if (!(info->valid & ACPI_VALID_HID)) {
|
||||
kfree(info);
|
||||
return AE_ERROR;
|
||||
}
|
||||
|
||||
hid = info->hardware_id.string;
|
||||
if ((hid == NULL) || strcmp(hid, ACPI_PROCESSOR_DEVICE_HID)) {
|
||||
kfree(info);
|
||||
return AE_ERROR;
|
||||
}
|
||||
|
||||
kfree(info);
|
||||
return AE_OK; /* found a processor device object */
|
||||
}
|
||||
|
||||
static acpi_status
|
||||
processor_walk_namespace_cb(acpi_handle handle,
|
||||
u32 lvl, void *context, void **rv)
|
||||
{
|
||||
acpi_status status;
|
||||
int *action = context;
|
||||
acpi_object_type type = 0;
|
||||
|
||||
status = acpi_get_type(handle, &type);
|
||||
status = is_processor_device(handle);
|
||||
if (ACPI_FAILURE(status))
|
||||
return (AE_OK);
|
||||
|
||||
if (type != ACPI_TYPE_PROCESSOR)
|
||||
return (AE_OK);
|
||||
return AE_OK; /* not a processor; continue to walk */
|
||||
|
||||
switch (*action) {
|
||||
case INSTALL_NOTIFY_HANDLER:
|
||||
|
@ -772,7 +805,8 @@ processor_walk_namespace_cb(acpi_handle handle,
|
|||
break;
|
||||
}
|
||||
|
||||
return (AE_OK);
|
||||
/* found a processor; skip walking underneath */
|
||||
return AE_CTRL_DEPTH;
|
||||
}
|
||||
|
||||
static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
|
||||
|
@ -830,7 +864,7 @@ void acpi_processor_install_hotplug_notify(void)
|
|||
{
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
int action = INSTALL_NOTIFY_HANDLER;
|
||||
acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
|
||||
acpi_walk_namespace(ACPI_TYPE_ANY,
|
||||
ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX,
|
||||
processor_walk_namespace_cb, NULL, &action, NULL);
|
||||
|
@ -843,7 +877,7 @@ void acpi_processor_uninstall_hotplug_notify(void)
|
|||
{
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
int action = UNINSTALL_NOTIFY_HANDLER;
|
||||
acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
|
||||
acpi_walk_namespace(ACPI_TYPE_ANY,
|
||||
ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX,
|
||||
processor_walk_namespace_cb, NULL, &action, NULL);
|
||||
|
|
|
@ -770,6 +770,35 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
|
|||
return index;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
|
||||
* @dev: the target CPU
|
||||
* @index: the index of suggested state
|
||||
*/
|
||||
static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
|
||||
{
|
||||
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
|
||||
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
|
||||
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
||||
while (1) {
|
||||
|
||||
if (cx->entry_method == ACPI_CSTATE_HALT)
|
||||
halt();
|
||||
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
|
||||
inb(cx->address);
|
||||
/* See comment in acpi_idle_do_entry() */
|
||||
inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
} else
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Never reached */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_idle_enter_simple - enters an ACPI state without BM handling
|
||||
* @dev: the target CPU
|
||||
|
@ -1077,12 +1106,14 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
|
|||
state->flags |= CPUIDLE_FLAG_TIME_VALID;
|
||||
|
||||
state->enter = acpi_idle_enter_c1;
|
||||
state->enter_dead = acpi_idle_play_dead;
|
||||
drv->safe_state_index = count;
|
||||
break;
|
||||
|
||||
case ACPI_STATE_C2:
|
||||
state->flags |= CPUIDLE_FLAG_TIME_VALID;
|
||||
state->enter = acpi_idle_enter_simple;
|
||||
state->enter_dead = acpi_idle_play_dead;
|
||||
drv->safe_state_index = count;
|
||||
break;
|
||||
|
||||
|
@ -1159,8 +1190,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
|
|||
* to make the code that updates C-States be called once.
|
||||
*/
|
||||
|
||||
if (smp_processor_id() == 0 &&
|
||||
cpuidle_get_driver() == &acpi_idle_driver) {
|
||||
if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
|
||||
|
||||
cpuidle_pause_and_lock();
|
||||
/* Protect against cpu-hotplug */
|
||||
|
|
|
@ -57,6 +57,27 @@ ACPI_MODULE_NAME("processor_thermal");
|
|||
static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
|
||||
static unsigned int acpi_thermal_cpufreq_is_init = 0;
|
||||
|
||||
#define reduction_pctg(cpu) \
|
||||
per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
|
||||
|
||||
/*
|
||||
* Emulate "per package data" using per cpu data (which should really be
|
||||
* provided elsewhere)
|
||||
*
|
||||
* Note we can lose a CPU on cpu hotunplug, in this case we forget the state
|
||||
* temporarily. Fortunately that's not a big issue here (I hope)
|
||||
*/
|
||||
static int phys_package_first_cpu(int cpu)
|
||||
{
|
||||
int i;
|
||||
int id = topology_physical_package_id(cpu);
|
||||
|
||||
for_each_online_cpu(i)
|
||||
if (topology_physical_package_id(i) == id)
|
||||
return i;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpu_has_cpufreq(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy policy;
|
||||
|
@ -76,7 +97,7 @@ static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
|
|||
|
||||
max_freq = (
|
||||
policy->cpuinfo.max_freq *
|
||||
(100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20)
|
||||
(100 - reduction_pctg(policy->cpu) * 20)
|
||||
) / 100;
|
||||
|
||||
cpufreq_verify_within_limits(policy, 0, max_freq);
|
||||
|
@ -102,16 +123,28 @@ static int cpufreq_get_cur_state(unsigned int cpu)
|
|||
if (!cpu_has_cpufreq(cpu))
|
||||
return 0;
|
||||
|
||||
return per_cpu(cpufreq_thermal_reduction_pctg, cpu);
|
||||
return reduction_pctg(cpu);
|
||||
}
|
||||
|
||||
static int cpufreq_set_cur_state(unsigned int cpu, int state)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!cpu_has_cpufreq(cpu))
|
||||
return 0;
|
||||
|
||||
per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state;
|
||||
cpufreq_update_policy(cpu);
|
||||
reduction_pctg(cpu) = state;
|
||||
|
||||
/*
|
||||
* Update all the CPUs in the same package because they all
|
||||
* contribute to the temperature and often share the same
|
||||
* frequency.
|
||||
*/
|
||||
for_each_online_cpu(i) {
|
||||
if (topology_physical_package_id(i) ==
|
||||
topology_physical_package_id(cpu))
|
||||
cpufreq_update_policy(i);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -119,10 +152,6 @@ void acpi_thermal_cpufreq_init(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_cpu_ids; i++)
|
||||
if (cpu_present(i))
|
||||
per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
|
||||
|
||||
i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
|
||||
CPUFREQ_POLICY_NOTIFIER);
|
||||
if (!i)
|
||||
|
|
|
@ -769,7 +769,7 @@ static int acpi_read_throttling_status(struct acpi_processor *pr,
|
|||
u64 *value)
|
||||
{
|
||||
u32 bit_width, bit_offset;
|
||||
u64 ptc_value;
|
||||
u32 ptc_value;
|
||||
u64 ptc_mask;
|
||||
struct acpi_processor_throttling *throttling;
|
||||
int ret = -1;
|
||||
|
@ -777,12 +777,11 @@ static int acpi_read_throttling_status(struct acpi_processor *pr,
|
|||
throttling = &pr->throttling;
|
||||
switch (throttling->status_register.space_id) {
|
||||
case ACPI_ADR_SPACE_SYSTEM_IO:
|
||||
ptc_value = 0;
|
||||
bit_width = throttling->status_register.bit_width;
|
||||
bit_offset = throttling->status_register.bit_offset;
|
||||
|
||||
acpi_os_read_port((acpi_io_address) throttling->status_register.
|
||||
address, (u32 *) &ptc_value,
|
||||
address, &ptc_value,
|
||||
(u32) (bit_width + bit_offset));
|
||||
ptc_mask = (1 << bit_width) - 1;
|
||||
*value = (u64) ((ptc_value >> bit_offset) & ptc_mask);
|
||||
|
|
|
@ -23,8 +23,7 @@ void acpi_reboot(void)
|
|||
/* Is the reset register supported? The spec says we should be
|
||||
* checking the bit width and bit offset, but Windows ignores
|
||||
* these fields */
|
||||
if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER))
|
||||
return;
|
||||
/* Ignore also acpi_gbl_FADT.flags.ACPI_FADT_RESET_REGISTER */
|
||||
|
||||
reset_value = acpi_gbl_FADT.reset_value;
|
||||
|
||||
|
|
|
@ -880,18 +880,22 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
|
|||
int j;
|
||||
|
||||
device->power.flags.power_resources = 1;
|
||||
ps->flags.valid = 1;
|
||||
for (j = 0; j < ps->resources.count; j++)
|
||||
acpi_bus_add_power_resource(ps->resources.handles[j]);
|
||||
}
|
||||
|
||||
/* The exist of _PR3 indicates D3Cold support */
|
||||
if (i == ACPI_STATE_D3) {
|
||||
status = acpi_get_handle(device->handle, object_name, &handle);
|
||||
if (ACPI_SUCCESS(status))
|
||||
device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
|
||||
}
|
||||
|
||||
/* Evaluate "_PSx" to see if we can do explicit sets */
|
||||
object_name[2] = 'S';
|
||||
status = acpi_get_handle(device->handle, object_name, &handle);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
if (ACPI_SUCCESS(status))
|
||||
ps->flags.explicit_set = 1;
|
||||
ps->flags.valid = 1;
|
||||
}
|
||||
|
||||
/* State is valid if we have some power control */
|
||||
if (ps->resources.count || ps->flags.explicit_set)
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
#include <linux/suspend.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
|
||||
|
@ -26,6 +28,24 @@
|
|||
#include "internal.h"
|
||||
#include "sleep.h"
|
||||
|
||||
static unsigned int gts, bfs;
|
||||
module_param(gts, uint, 0644);
|
||||
module_param(bfs, uint, 0644);
|
||||
MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
|
||||
MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
|
||||
|
||||
static u8 wake_sleep_flags(void)
|
||||
{
|
||||
u8 flags = ACPI_NO_OPTIONAL_METHODS;
|
||||
|
||||
if (gts)
|
||||
flags |= ACPI_EXECUTE_GTS;
|
||||
if (bfs)
|
||||
flags |= ACPI_EXECUTE_BFS;
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static u8 sleep_states[ACPI_S_STATE_COUNT];
|
||||
|
||||
static void acpi_sleep_tts_switch(u32 acpi_state)
|
||||
|
@ -243,6 +263,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
|
|||
{
|
||||
acpi_status status = AE_OK;
|
||||
u32 acpi_state = acpi_target_sleep_state;
|
||||
u8 flags = wake_sleep_flags();
|
||||
int error;
|
||||
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
@ -250,7 +271,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
|
|||
switch (acpi_state) {
|
||||
case ACPI_STATE_S1:
|
||||
barrier();
|
||||
status = acpi_enter_sleep_state(acpi_state);
|
||||
status = acpi_enter_sleep_state(acpi_state, flags);
|
||||
break;
|
||||
|
||||
case ACPI_STATE_S3:
|
||||
|
@ -265,7 +286,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
|
|||
acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
|
||||
|
||||
/* Reprogram control registers and execute _BFS */
|
||||
acpi_leave_sleep_state_prep(acpi_state);
|
||||
acpi_leave_sleep_state_prep(acpi_state, flags);
|
||||
|
||||
/* ACPI 3.0 specs (P62) says that it's the responsibility
|
||||
* of the OSPM to clear the status bit [ implying that the
|
||||
|
@ -529,27 +550,30 @@ static int acpi_hibernation_begin(void)
|
|||
|
||||
static int acpi_hibernation_enter(void)
|
||||
{
|
||||
u8 flags = wake_sleep_flags();
|
||||
acpi_status status = AE_OK;
|
||||
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
||||
/* This shouldn't return. If it returns, we have a problem */
|
||||
status = acpi_enter_sleep_state(ACPI_STATE_S4);
|
||||
status = acpi_enter_sleep_state(ACPI_STATE_S4, flags);
|
||||
/* Reprogram control registers and execute _BFS */
|
||||
acpi_leave_sleep_state_prep(ACPI_STATE_S4);
|
||||
acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags);
|
||||
|
||||
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
|
||||
}
|
||||
|
||||
static void acpi_hibernation_leave(void)
|
||||
{
|
||||
u8 flags = wake_sleep_flags();
|
||||
|
||||
/*
|
||||
* If ACPI is not enabled by the BIOS and the boot kernel, we need to
|
||||
* enable it here.
|
||||
*/
|
||||
acpi_enable();
|
||||
/* Reprogram control registers and execute _BFS */
|
||||
acpi_leave_sleep_state_prep(ACPI_STATE_S4);
|
||||
acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags);
|
||||
/* Check the hardware signature */
|
||||
if (facs && s4_hardware_signature != facs->hardware_signature) {
|
||||
printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
|
||||
|
@ -729,6 +753,40 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
|
|||
#endif /* CONFIG_PM */
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
/**
|
||||
* acpi_pm_device_run_wake - Enable/disable wake-up for given device.
|
||||
* @phys_dev: Device to enable/disable the platform to wake-up the system for.
|
||||
* @enable: Whether enable or disable the wake-up functionality.
|
||||
*
|
||||
* Find the ACPI device object corresponding to @pci_dev and try to
|
||||
* enable/disable the GPE associated with it.
|
||||
*/
|
||||
int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
|
||||
{
|
||||
struct acpi_device *dev;
|
||||
acpi_handle handle;
|
||||
|
||||
if (!device_run_wake(phys_dev))
|
||||
return -EINVAL;
|
||||
|
||||
handle = DEVICE_ACPI_HANDLE(phys_dev);
|
||||
if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) {
|
||||
dev_dbg(phys_dev, "ACPI handle has no context in %s!\n",
|
||||
__func__);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (enable) {
|
||||
acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
|
||||
acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
|
||||
} else {
|
||||
acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
|
||||
acpi_disable_wakeup_device_power(dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_pm_device_sleep_wake - enable or disable the system wake-up
|
||||
* capability of given device
|
||||
|
@ -770,10 +828,12 @@ static void acpi_power_off_prepare(void)
|
|||
|
||||
static void acpi_power_off(void)
|
||||
{
|
||||
u8 flags = wake_sleep_flags();
|
||||
|
||||
/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
|
||||
printk(KERN_DEBUG "%s called\n", __func__);
|
||||
local_irq_disable();
|
||||
acpi_enter_sleep_state(ACPI_STATE_S5);
|
||||
acpi_enter_sleep_state(ACPI_STATE_S5, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -788,13 +848,13 @@ static void __init acpi_gts_bfs_check(void)
|
|||
{
|
||||
acpi_handle dummy;
|
||||
|
||||
if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_NAME__GTS, &dummy)))
|
||||
if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__GTS, &dummy)))
|
||||
{
|
||||
printk(KERN_NOTICE PREFIX "BIOS offers _GTS\n");
|
||||
printk(KERN_NOTICE PREFIX "If \"acpi.gts=1\" improves suspend, "
|
||||
"please notify linux-acpi@vger.kernel.org\n");
|
||||
}
|
||||
if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_NAME__BFS, &dummy)))
|
||||
if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__BFS, &dummy)))
|
||||
{
|
||||
printk(KERN_NOTICE PREFIX "BIOS offers _BFS\n");
|
||||
printk(KERN_NOTICE PREFIX "If \"acpi.bfs=1\" improves resume, "
|
||||
|
|
|
@ -941,13 +941,13 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz)
|
|||
if (!tz)
|
||||
return -EINVAL;
|
||||
|
||||
/* Get temperature [_TMP] (required) */
|
||||
result = acpi_thermal_get_temperature(tz);
|
||||
/* Get trip points [_CRT, _PSV, etc.] (required) */
|
||||
result = acpi_thermal_get_trip_points(tz);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
/* Get trip points [_CRT, _PSV, etc.] (required) */
|
||||
result = acpi_thermal_get_trip_points(tz);
|
||||
/* Get temperature [_TMP] (required) */
|
||||
result = acpi_thermal_get_temperature(tz);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
|
|
|
@ -548,27 +548,27 @@ acpi_video_device_EDID(struct acpi_video_device *device,
|
|||
* 1. The system BIOS should NOT automatically control the brightness
|
||||
* level of the LCD when the power changes from AC to DC.
|
||||
* Return Value:
|
||||
* -1 wrong arg.
|
||||
* -EINVAL wrong arg.
|
||||
*/
|
||||
|
||||
static int
|
||||
acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
|
||||
{
|
||||
u64 status = 0;
|
||||
acpi_status status;
|
||||
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
|
||||
struct acpi_object_list args = { 1, &arg0 };
|
||||
|
||||
|
||||
if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1) {
|
||||
status = -1;
|
||||
goto Failed;
|
||||
}
|
||||
if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
|
||||
return -EINVAL;
|
||||
arg0.integer.value = (lcd_flag << 2) | bios_flag;
|
||||
video->dos_setting = arg0.integer.value;
|
||||
acpi_evaluate_object(video->device->handle, "_DOS", &args, NULL);
|
||||
status = acpi_evaluate_object(video->device->handle, "_DOS",
|
||||
&args, NULL);
|
||||
if (ACPI_FAILURE(status))
|
||||
return -EIO;
|
||||
|
||||
Failed:
|
||||
return status;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1343,15 +1343,17 @@ static int
|
|||
acpi_video_bus_get_devices(struct acpi_video_bus *video,
|
||||
struct acpi_device *device)
|
||||
{
|
||||
int status = 0;
|
||||
int status;
|
||||
struct acpi_device *dev;
|
||||
|
||||
acpi_video_device_enumerate(video);
|
||||
status = acpi_video_device_enumerate(video);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
list_for_each_entry(dev, &device->children, node) {
|
||||
|
||||
status = acpi_video_bus_get_one_device(dev, video);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
if (status) {
|
||||
printk(KERN_WARNING PREFIX
|
||||
"Can't attach device\n");
|
||||
continue;
|
||||
|
@ -1653,15 +1655,20 @@ static int acpi_video_bus_add(struct acpi_device *device)
|
|||
mutex_init(&video->device_list_lock);
|
||||
INIT_LIST_HEAD(&video->video_device_list);
|
||||
|
||||
acpi_video_bus_get_devices(video, device);
|
||||
acpi_video_bus_start_devices(video);
|
||||
error = acpi_video_bus_get_devices(video, device);
|
||||
if (error)
|
||||
goto err_free_video;
|
||||
|
||||
video->input = input = input_allocate_device();
|
||||
if (!input) {
|
||||
error = -ENOMEM;
|
||||
goto err_stop_video;
|
||||
goto err_put_video;
|
||||
}
|
||||
|
||||
error = acpi_video_bus_start_devices(video);
|
||||
if (error)
|
||||
goto err_free_input_dev;
|
||||
|
||||
snprintf(video->phys, sizeof(video->phys),
|
||||
"%s/video/input0", acpi_device_hid(video->device));
|
||||
|
||||
|
@ -1682,7 +1689,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
|
|||
|
||||
error = input_register_device(input);
|
||||
if (error)
|
||||
goto err_free_input_dev;
|
||||
goto err_stop_video;
|
||||
|
||||
printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n",
|
||||
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
|
||||
|
@ -1692,14 +1699,19 @@ static int acpi_video_bus_add(struct acpi_device *device)
|
|||
|
||||
video->pm_nb.notifier_call = acpi_video_resume;
|
||||
video->pm_nb.priority = 0;
|
||||
register_pm_notifier(&video->pm_nb);
|
||||
error = register_pm_notifier(&video->pm_nb);
|
||||
if (error)
|
||||
goto err_unregister_input_dev;
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_input_dev:
|
||||
input_free_device(input);
|
||||
err_unregister_input_dev:
|
||||
input_unregister_device(input);
|
||||
err_stop_video:
|
||||
acpi_video_bus_stop_devices(video);
|
||||
err_free_input_dev:
|
||||
input_free_device(input);
|
||||
err_put_video:
|
||||
acpi_video_bus_put_devices(video);
|
||||
kfree(video->attached_array);
|
||||
err_free_video:
|
||||
|
|
|
@ -53,6 +53,52 @@ static void cpuidle_kick_cpus(void) {}
|
|||
|
||||
static int __cpuidle_register_device(struct cpuidle_device *dev);
|
||||
|
||||
static inline int cpuidle_enter(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
{
|
||||
struct cpuidle_state *target_state = &drv->states[index];
|
||||
return target_state->enter(dev, drv, index);
|
||||
}
|
||||
|
||||
static inline int cpuidle_enter_tk(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
{
|
||||
return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter);
|
||||
}
|
||||
|
||||
typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index);
|
||||
|
||||
static cpuidle_enter_t cpuidle_enter_ops;
|
||||
|
||||
/**
|
||||
* cpuidle_play_dead - cpu off-lining
|
||||
*
|
||||
* Only returns in case of an error
|
||||
*/
|
||||
int cpuidle_play_dead(void)
|
||||
{
|
||||
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
|
||||
struct cpuidle_driver *drv = cpuidle_get_driver();
|
||||
int i, dead_state = -1;
|
||||
int power_usage = -1;
|
||||
|
||||
/* Find lowest-power state that supports long-term idle */
|
||||
for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
|
||||
struct cpuidle_state *s = &drv->states[i];
|
||||
|
||||
if (s->power_usage < power_usage && s->enter_dead) {
|
||||
power_usage = s->power_usage;
|
||||
dead_state = i;
|
||||
}
|
||||
}
|
||||
|
||||
if (dead_state != -1)
|
||||
return drv->states[dead_state].enter_dead(dev, dead_state);
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_idle_call - the main idle loop
|
||||
*
|
||||
|
@ -63,7 +109,6 @@ int cpuidle_idle_call(void)
|
|||
{
|
||||
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
|
||||
struct cpuidle_driver *drv = cpuidle_get_driver();
|
||||
struct cpuidle_state *target_state;
|
||||
int next_state, entered_state;
|
||||
|
||||
if (off)
|
||||
|
@ -92,12 +137,10 @@ int cpuidle_idle_call(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
target_state = &drv->states[next_state];
|
||||
|
||||
trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
|
||||
trace_cpu_idle_rcuidle(next_state, dev->cpu);
|
||||
|
||||
entered_state = target_state->enter(dev, drv, next_state);
|
||||
entered_state = cpuidle_enter_ops(dev, drv, next_state);
|
||||
|
||||
trace_power_end_rcuidle(dev->cpu);
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
|
||||
|
@ -110,6 +153,8 @@ int cpuidle_idle_call(void)
|
|||
dev->states_usage[entered_state].time +=
|
||||
(unsigned long long)dev->last_residency;
|
||||
dev->states_usage[entered_state].usage++;
|
||||
} else {
|
||||
dev->last_residency = 0;
|
||||
}
|
||||
|
||||
/* give the governor an opportunity to reflect on the outcome */
|
||||
|
@ -164,6 +209,37 @@ void cpuidle_resume_and_unlock(void)
|
|||
|
||||
EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
|
||||
|
||||
/**
|
||||
* cpuidle_wrap_enter - performs timekeeping and irqen around enter function
|
||||
* @dev: pointer to a valid cpuidle_device object
|
||||
* @drv: pointer to a valid cpuidle_driver object
|
||||
* @index: index of the target cpuidle state.
|
||||
*/
|
||||
int cpuidle_wrap_enter(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index,
|
||||
int (*enter)(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index))
|
||||
{
|
||||
ktime_t time_start, time_end;
|
||||
s64 diff;
|
||||
|
||||
time_start = ktime_get();
|
||||
|
||||
index = enter(dev, drv, index);
|
||||
|
||||
time_end = ktime_get();
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
diff = ktime_to_us(ktime_sub(time_end, time_start));
|
||||
if (diff > INT_MAX)
|
||||
diff = INT_MAX;
|
||||
|
||||
dev->last_residency = (int) diff;
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
|
||||
static int poll_idle(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
|
@ -197,6 +273,7 @@ static void poll_idle_init(struct cpuidle_driver *drv)
|
|||
state->power_usage = -1;
|
||||
state->flags = 0;
|
||||
state->enter = poll_idle;
|
||||
state->disable = 0;
|
||||
}
|
||||
#else
|
||||
static void poll_idle_init(struct cpuidle_driver *drv) {}
|
||||
|
@ -212,13 +289,14 @@ static void poll_idle_init(struct cpuidle_driver *drv) {}
|
|||
int cpuidle_enable_device(struct cpuidle_device *dev)
|
||||
{
|
||||
int ret, i;
|
||||
struct cpuidle_driver *drv = cpuidle_get_driver();
|
||||
|
||||
if (dev->enabled)
|
||||
return 0;
|
||||
if (!cpuidle_get_driver() || !cpuidle_curr_governor)
|
||||
if (!drv || !cpuidle_curr_governor)
|
||||
return -EIO;
|
||||
if (!dev->state_count)
|
||||
return -EINVAL;
|
||||
dev->state_count = drv->state_count;
|
||||
|
||||
if (dev->registered == 0) {
|
||||
ret = __cpuidle_register_device(dev);
|
||||
|
@ -226,13 +304,16 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
poll_idle_init(cpuidle_get_driver());
|
||||
cpuidle_enter_ops = drv->en_core_tk_irqen ?
|
||||
cpuidle_enter_tk : cpuidle_enter;
|
||||
|
||||
poll_idle_init(drv);
|
||||
|
||||
if ((ret = cpuidle_add_state_sysfs(dev)))
|
||||
return ret;
|
||||
|
||||
if (cpuidle_curr_governor->enable &&
|
||||
(ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev)))
|
||||
(ret = cpuidle_curr_governor->enable(drv, dev)))
|
||||
goto fail_sysfs;
|
||||
|
||||
for (i = 0; i < dev->state_count; i++) {
|
||||
|
|
|
@ -47,7 +47,7 @@ static void __cpuidle_register_driver(struct cpuidle_driver *drv)
|
|||
*/
|
||||
int cpuidle_register_driver(struct cpuidle_driver *drv)
|
||||
{
|
||||
if (!drv)
|
||||
if (!drv || !drv->state_count)
|
||||
return -EINVAL;
|
||||
|
||||
if (cpuidle_disabled())
|
||||
|
|
|
@ -236,7 +236,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
|||
{
|
||||
struct menu_device *data = &__get_cpu_var(menu_devices);
|
||||
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
|
||||
unsigned int power_usage = -1;
|
||||
int power_usage = -1;
|
||||
int i;
|
||||
int multiplier;
|
||||
struct timespec t;
|
||||
|
@ -280,7 +280,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
|||
* We want to default to C1 (hlt), not to busy polling
|
||||
* unless the timer is happening really really soon.
|
||||
*/
|
||||
if (data->expected_us > 5)
|
||||
if (data->expected_us > 5 &&
|
||||
drv->states[CPUIDLE_DRIVER_STATE_START].disable == 0)
|
||||
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
|
||||
|
||||
/*
|
||||
|
@ -290,6 +291,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
|||
for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
|
||||
struct cpuidle_state *s = &drv->states[i];
|
||||
|
||||
if (s->disable)
|
||||
continue;
|
||||
if (s->target_residency > data->predicted_us)
|
||||
continue;
|
||||
if (s->exit_latency > latency_req)
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/sysfs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/capability.h>
|
||||
|
||||
#include "cpuidle.h"
|
||||
|
||||
|
@ -222,6 +223,9 @@ struct cpuidle_state_attr {
|
|||
#define define_one_state_ro(_name, show) \
|
||||
static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
|
||||
|
||||
#define define_one_state_rw(_name, show, store) \
|
||||
static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0644, show, store)
|
||||
|
||||
#define define_show_state_function(_name) \
|
||||
static ssize_t show_state_##_name(struct cpuidle_state *state, \
|
||||
struct cpuidle_state_usage *state_usage, char *buf) \
|
||||
|
@ -229,6 +233,24 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
|
|||
return sprintf(buf, "%u\n", state->_name);\
|
||||
}
|
||||
|
||||
#define define_store_state_function(_name) \
|
||||
static ssize_t store_state_##_name(struct cpuidle_state *state, \
|
||||
const char *buf, size_t size) \
|
||||
{ \
|
||||
long value; \
|
||||
int err; \
|
||||
if (!capable(CAP_SYS_ADMIN)) \
|
||||
return -EPERM; \
|
||||
err = kstrtol(buf, 0, &value); \
|
||||
if (err) \
|
||||
return err; \
|
||||
if (value) \
|
||||
state->disable = 1; \
|
||||
else \
|
||||
state->disable = 0; \
|
||||
return size; \
|
||||
}
|
||||
|
||||
#define define_show_state_ull_function(_name) \
|
||||
static ssize_t show_state_##_name(struct cpuidle_state *state, \
|
||||
struct cpuidle_state_usage *state_usage, char *buf) \
|
||||
|
@ -251,6 +273,8 @@ define_show_state_ull_function(usage)
|
|||
define_show_state_ull_function(time)
|
||||
define_show_state_str_function(name)
|
||||
define_show_state_str_function(desc)
|
||||
define_show_state_function(disable)
|
||||
define_store_state_function(disable)
|
||||
|
||||
define_one_state_ro(name, show_state_name);
|
||||
define_one_state_ro(desc, show_state_desc);
|
||||
|
@ -258,6 +282,7 @@ define_one_state_ro(latency, show_state_exit_latency);
|
|||
define_one_state_ro(power, show_state_power_usage);
|
||||
define_one_state_ro(usage, show_state_usage);
|
||||
define_one_state_ro(time, show_state_time);
|
||||
define_one_state_rw(disable, show_state_disable, store_state_disable);
|
||||
|
||||
static struct attribute *cpuidle_state_default_attrs[] = {
|
||||
&attr_name.attr,
|
||||
|
@ -266,6 +291,7 @@ static struct attribute *cpuidle_state_default_attrs[] = {
|
|||
&attr_power.attr,
|
||||
&attr_usage.attr,
|
||||
&attr_time.attr,
|
||||
&attr_disable.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -287,8 +313,22 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t cpuidle_state_store(struct kobject *kobj,
|
||||
struct attribute *attr, const char *buf, size_t size)
|
||||
{
|
||||
int ret = -EIO;
|
||||
struct cpuidle_state *state = kobj_to_state(kobj);
|
||||
struct cpuidle_state_attr *cattr = attr_to_stateattr(attr);
|
||||
|
||||
if (cattr->store)
|
||||
ret = cattr->store(state, buf, size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct sysfs_ops cpuidle_state_sysfs_ops = {
|
||||
.show = cpuidle_state_show,
|
||||
.store = cpuidle_state_store,
|
||||
};
|
||||
|
||||
static void cpuidle_state_sysfs_release(struct kobject *kobj)
|
||||
|
|
|
@ -277,40 +277,6 @@ static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_dev_run_wake - Enable/disable wake-up for given device.
|
||||
* @phys_dev: Device to enable/disable the platform to wake-up the system for.
|
||||
* @enable: Whether enable or disable the wake-up functionality.
|
||||
*
|
||||
* Find the ACPI device object corresponding to @pci_dev and try to
|
||||
* enable/disable the GPE associated with it.
|
||||
*/
|
||||
static int acpi_dev_run_wake(struct device *phys_dev, bool enable)
|
||||
{
|
||||
struct acpi_device *dev;
|
||||
acpi_handle handle;
|
||||
|
||||
if (!device_run_wake(phys_dev))
|
||||
return -EINVAL;
|
||||
|
||||
handle = DEVICE_ACPI_HANDLE(phys_dev);
|
||||
if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) {
|
||||
dev_dbg(phys_dev, "ACPI handle has no context in %s!\n",
|
||||
__func__);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (enable) {
|
||||
acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
|
||||
acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
|
||||
} else {
|
||||
acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
|
||||
acpi_disable_wakeup_device_power(dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
|
||||
{
|
||||
while (bus->parent) {
|
||||
|
@ -318,14 +284,14 @@ static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
|
|||
|
||||
if (bridge->pme_interrupt)
|
||||
return;
|
||||
if (!acpi_dev_run_wake(&bridge->dev, enable))
|
||||
if (!acpi_pm_device_run_wake(&bridge->dev, enable))
|
||||
return;
|
||||
bus = bus->parent;
|
||||
}
|
||||
|
||||
/* We have reached the root bus. */
|
||||
if (bus->bridge)
|
||||
acpi_dev_run_wake(bus->bridge, enable);
|
||||
acpi_pm_device_run_wake(bus->bridge, enable);
|
||||
}
|
||||
|
||||
static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
|
||||
|
@ -333,7 +299,7 @@ static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
|
|||
if (dev->pme_interrupt)
|
||||
return 0;
|
||||
|
||||
if (!acpi_dev_run_wake(&dev->dev, enable))
|
||||
if (!acpi_pm_device_run_wake(&dev->dev, enable))
|
||||
return 0;
|
||||
|
||||
acpi_pci_propagate_run_wake(dev->bus, enable);
|
||||
|
|
|
@ -609,25 +609,16 @@ static bool mcp_exceeded(struct ips_driver *ips)
|
|||
bool ret = false;
|
||||
u32 temp_limit;
|
||||
u32 avg_power;
|
||||
const char *msg = "MCP limit exceeded: ";
|
||||
|
||||
spin_lock_irqsave(&ips->turbo_status_lock, flags);
|
||||
|
||||
temp_limit = ips->mcp_temp_limit * 100;
|
||||
if (ips->mcp_avg_temp > temp_limit) {
|
||||
dev_info(&ips->dev->dev,
|
||||
"%sAvg temp %u, limit %u\n", msg, ips->mcp_avg_temp,
|
||||
temp_limit);
|
||||
if (ips->mcp_avg_temp > temp_limit)
|
||||
ret = true;
|
||||
}
|
||||
|
||||
avg_power = ips->cpu_avg_power + ips->mch_avg_power;
|
||||
if (avg_power > ips->mcp_power_limit) {
|
||||
dev_info(&ips->dev->dev,
|
||||
"%sAvg power %u, limit %u\n", msg, avg_power,
|
||||
ips->mcp_power_limit);
|
||||
if (avg_power > ips->mcp_power_limit)
|
||||
ret = true;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
|
||||
|
||||
|
|
|
@ -321,9 +321,14 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
|
|||
{
|
||||
struct acpi_device *acpi = to_acpi_device(dev);
|
||||
struct pnp_dev *pnp = _pnp;
|
||||
struct device *physical_device;
|
||||
|
||||
physical_device = acpi_get_physical_device(acpi->handle);
|
||||
if (physical_device)
|
||||
put_device(physical_device);
|
||||
|
||||
/* true means it matched */
|
||||
return !acpi_get_physical_device(acpi->handle)
|
||||
return !physical_device
|
||||
&& compare_pnp_id(pnp->id, acpi_device_hid(acpi));
|
||||
}
|
||||
|
||||
|
|
|
@ -18,3 +18,11 @@ config THERMAL_HWMON
|
|||
depends on THERMAL
|
||||
depends on HWMON=y || HWMON=THERMAL
|
||||
default y
|
||||
|
||||
config SPEAR_THERMAL
|
||||
bool "SPEAr thermal sensor driver"
|
||||
depends on THERMAL
|
||||
depends on PLAT_SPEAR
|
||||
help
|
||||
Enable this to plug the SPEAr thermal sensor driver into the Linux
|
||||
thermal framework
|
||||
|
|
|
@ -3,3 +3,4 @@
|
|||
#
|
||||
|
||||
obj-$(CONFIG_THERMAL) += thermal_sys.o
|
||||
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
|
|
@ -0,0 +1,206 @@
|
|||
/*
|
||||
* SPEAr thermal driver.
|
||||
*
|
||||
* Copyright (C) 2011-2012 ST Microelectronics
|
||||
* Author: Vincenzo Frascino <vincenzo.frascino@st.com>
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/platform_data/spear_thermal.h>
|
||||
#include <linux/thermal.h>
|
||||
|
||||
#define MD_FACTOR 1000
|
||||
|
||||
/* SPEAr Thermal Sensor Dev Structure */
|
||||
struct spear_thermal_dev {
|
||||
/* pointer to base address of the thermal sensor */
|
||||
void __iomem *thermal_base;
|
||||
/* clk structure */
|
||||
struct clk *clk;
|
||||
/* pointer to thermal flags */
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
static inline int thermal_get_temp(struct thermal_zone_device *thermal,
|
||||
unsigned long *temp)
|
||||
{
|
||||
struct spear_thermal_dev *stdev = thermal->devdata;
|
||||
|
||||
/*
|
||||
* Data are ready to be read after 628 usec from POWERDOWN signal
|
||||
* (PDN) = 1
|
||||
*/
|
||||
*temp = (readl_relaxed(stdev->thermal_base) & 0x7F) * MD_FACTOR;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct thermal_zone_device_ops ops = {
|
||||
.get_temp = thermal_get_temp,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int spear_thermal_suspend(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
|
||||
struct spear_thermal_dev *stdev = spear_thermal->devdata;
|
||||
unsigned int actual_mask = 0;
|
||||
|
||||
/* Disable SPEAr Thermal Sensor */
|
||||
actual_mask = readl_relaxed(stdev->thermal_base);
|
||||
writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base);
|
||||
|
||||
clk_disable(stdev->clk);
|
||||
dev_info(dev, "Suspended.\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int spear_thermal_resume(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
|
||||
struct spear_thermal_dev *stdev = spear_thermal->devdata;
|
||||
unsigned int actual_mask = 0;
|
||||
int ret = 0;
|
||||
|
||||
ret = clk_enable(stdev->clk);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Can't enable clock\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Enable SPEAr Thermal Sensor */
|
||||
actual_mask = readl_relaxed(stdev->thermal_base);
|
||||
writel_relaxed(actual_mask | stdev->flags, stdev->thermal_base);
|
||||
|
||||
dev_info(dev, "Resumed.\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend,
|
||||
spear_thermal_resume);
|
||||
|
||||
static int spear_thermal_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct thermal_zone_device *spear_thermal = NULL;
|
||||
struct spear_thermal_dev *stdev;
|
||||
struct spear_thermal_pdata *pdata;
|
||||
int ret = 0;
|
||||
struct resource *stres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
|
||||
if (!stres) {
|
||||
dev_err(&pdev->dev, "memory resource missing\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pdata = dev_get_platdata(&pdev->dev);
|
||||
if (!pdata) {
|
||||
dev_err(&pdev->dev, "platform data is NULL\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
stdev = devm_kzalloc(&pdev->dev, sizeof(*stdev), GFP_KERNEL);
|
||||
if (!stdev) {
|
||||
dev_err(&pdev->dev, "kzalloc fail\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Enable thermal sensor */
|
||||
stdev->thermal_base = devm_ioremap(&pdev->dev, stres->start,
|
||||
resource_size(stres));
|
||||
if (!stdev->thermal_base) {
|
||||
dev_err(&pdev->dev, "ioremap failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
stdev->clk = clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(stdev->clk)) {
|
||||
dev_err(&pdev->dev, "Can't get clock\n");
|
||||
return PTR_ERR(stdev->clk);
|
||||
}
|
||||
|
||||
ret = clk_enable(stdev->clk);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Can't enable clock\n");
|
||||
goto put_clk;
|
||||
}
|
||||
|
||||
stdev->flags = pdata->thermal_flags;
|
||||
writel_relaxed(stdev->flags, stdev->thermal_base);
|
||||
|
||||
spear_thermal = thermal_zone_device_register("spear_thermal", 0,
|
||||
stdev, &ops, 0, 0, 0, 0);
|
||||
if (IS_ERR(spear_thermal)) {
|
||||
dev_err(&pdev->dev, "thermal zone device is NULL\n");
|
||||
ret = PTR_ERR(spear_thermal);
|
||||
goto disable_clk;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, spear_thermal);
|
||||
|
||||
dev_info(&spear_thermal->device, "Thermal Sensor Loaded at: 0x%p.\n",
|
||||
stdev->thermal_base);
|
||||
|
||||
return 0;
|
||||
|
||||
disable_clk:
|
||||
clk_disable(stdev->clk);
|
||||
put_clk:
|
||||
clk_put(stdev->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int spear_thermal_exit(struct platform_device *pdev)
|
||||
{
|
||||
unsigned int actual_mask = 0;
|
||||
struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
|
||||
struct spear_thermal_dev *stdev = spear_thermal->devdata;
|
||||
|
||||
thermal_zone_device_unregister(spear_thermal);
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
|
||||
/* Disable SPEAr Thermal Sensor */
|
||||
actual_mask = readl_relaxed(stdev->thermal_base);
|
||||
writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base);
|
||||
|
||||
clk_disable(stdev->clk);
|
||||
clk_put(stdev->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver spear_thermal_driver = {
|
||||
.probe = spear_thermal_probe,
|
||||
.remove = spear_thermal_exit,
|
||||
.driver = {
|
||||
.name = "spear_thermal",
|
||||
.owner = THIS_MODULE,
|
||||
.pm = &spear_thermal_pm_ops,
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(spear_thermal_driver);
|
||||
|
||||
MODULE_AUTHOR("Vincenzo Frascino <vincenzo.frascino@st.com>");
|
||||
MODULE_DESCRIPTION("SPEAr thermal driver");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -23,6 +23,8 @@
|
|||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
|
@ -39,8 +41,6 @@ MODULE_AUTHOR("Zhang Rui");
|
|||
MODULE_DESCRIPTION("Generic thermal management sysfs support");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
#define PREFIX "Thermal: "
|
||||
|
||||
struct thermal_cooling_device_instance {
|
||||
int id;
|
||||
char name[THERMAL_NAME_LENGTH];
|
||||
|
@ -60,13 +60,11 @@ static LIST_HEAD(thermal_tz_list);
|
|||
static LIST_HEAD(thermal_cdev_list);
|
||||
static DEFINE_MUTEX(thermal_list_lock);
|
||||
|
||||
static unsigned int thermal_event_seqnum;
|
||||
|
||||
static int get_idr(struct idr *idr, struct mutex *lock, int *id)
|
||||
{
|
||||
int err;
|
||||
|
||||
again:
|
||||
again:
|
||||
if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -152,9 +150,9 @@ mode_store(struct device *dev, struct device_attribute *attr,
|
|||
if (!tz->ops->set_mode)
|
||||
return -EPERM;
|
||||
|
||||
if (!strncmp(buf, "enabled", sizeof("enabled")))
|
||||
if (!strncmp(buf, "enabled", sizeof("enabled") - 1))
|
||||
result = tz->ops->set_mode(tz, THERMAL_DEVICE_ENABLED);
|
||||
else if (!strncmp(buf, "disabled", sizeof("disabled")))
|
||||
else if (!strncmp(buf, "disabled", sizeof("disabled") - 1))
|
||||
result = tz->ops->set_mode(tz, THERMAL_DEVICE_DISABLED);
|
||||
else
|
||||
result = -EINVAL;
|
||||
|
@ -283,8 +281,7 @@ passive_show(struct device *dev, struct device_attribute *attr,
|
|||
static DEVICE_ATTR(type, 0444, type_show, NULL);
|
||||
static DEVICE_ATTR(temp, 0444, temp_show, NULL);
|
||||
static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
|
||||
static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, \
|
||||
passive_store);
|
||||
static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store);
|
||||
|
||||
static struct device_attribute trip_point_attrs[] = {
|
||||
__ATTR(trip_point_0_type, 0444, trip_point_type_show, NULL),
|
||||
|
@ -313,22 +310,6 @@ static struct device_attribute trip_point_attrs[] = {
|
|||
__ATTR(trip_point_11_temp, 0444, trip_point_temp_show, NULL),
|
||||
};
|
||||
|
||||
#define TRIP_POINT_ATTR_ADD(_dev, _index, result) \
|
||||
do { \
|
||||
result = device_create_file(_dev, \
|
||||
&trip_point_attrs[_index * 2]); \
|
||||
if (result) \
|
||||
break; \
|
||||
result = device_create_file(_dev, \
|
||||
&trip_point_attrs[_index * 2 + 1]); \
|
||||
} while (0)
|
||||
|
||||
#define TRIP_POINT_ATTR_REMOVE(_dev, _index) \
|
||||
do { \
|
||||
device_remove_file(_dev, &trip_point_attrs[_index * 2]); \
|
||||
device_remove_file(_dev, &trip_point_attrs[_index * 2 + 1]); \
|
||||
} while (0)
|
||||
|
||||
/* sys I/F for cooling device */
|
||||
#define to_cooling_device(_dev) \
|
||||
container_of(_dev, struct thermal_cooling_device, device)
|
||||
|
@ -835,15 +816,14 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
|
|||
return 0;
|
||||
|
||||
device_remove_file(&tz->device, &dev->attr);
|
||||
remove_symbol_link:
|
||||
remove_symbol_link:
|
||||
sysfs_remove_link(&tz->device.kobj, dev->name);
|
||||
release_idr:
|
||||
release_idr:
|
||||
release_idr(&tz->idr, &tz->lock, dev->id);
|
||||
free_mem:
|
||||
free_mem:
|
||||
kfree(dev);
|
||||
return result;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(thermal_zone_bind_cooling_device);
|
||||
|
||||
/**
|
||||
|
@ -873,14 +853,13 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
|
|||
|
||||
return -ENODEV;
|
||||
|
||||
unbind:
|
||||
unbind:
|
||||
device_remove_file(&tz->device, &pos->attr);
|
||||
sysfs_remove_link(&tz->device.kobj, pos->name);
|
||||
release_idr(&tz->idr, &tz->lock, pos->id);
|
||||
kfree(pos);
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(thermal_zone_unbind_cooling_device);
|
||||
|
||||
static void thermal_release(struct device *dev)
|
||||
|
@ -888,7 +867,8 @@ static void thermal_release(struct device *dev)
|
|||
struct thermal_zone_device *tz;
|
||||
struct thermal_cooling_device *cdev;
|
||||
|
||||
if (!strncmp(dev_name(dev), "thermal_zone", sizeof "thermal_zone" - 1)) {
|
||||
if (!strncmp(dev_name(dev), "thermal_zone",
|
||||
sizeof("thermal_zone") - 1)) {
|
||||
tz = to_thermal_zone(dev);
|
||||
kfree(tz);
|
||||
} else {
|
||||
|
@ -908,8 +888,9 @@ static struct class thermal_class = {
|
|||
* @devdata: device private data.
|
||||
* @ops: standard thermal cooling devices callbacks.
|
||||
*/
|
||||
struct thermal_cooling_device *thermal_cooling_device_register(
|
||||
char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
|
||||
struct thermal_cooling_device *
|
||||
thermal_cooling_device_register(char *type, void *devdata,
|
||||
const struct thermal_cooling_device_ops *ops)
|
||||
{
|
||||
struct thermal_cooling_device *cdev;
|
||||
struct thermal_zone_device *pos;
|
||||
|
@ -974,12 +955,11 @@ struct thermal_cooling_device *thermal_cooling_device_register(
|
|||
if (!result)
|
||||
return cdev;
|
||||
|
||||
unregister:
|
||||
unregister:
|
||||
release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
|
||||
device_unregister(&cdev->device);
|
||||
return ERR_PTR(result);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(thermal_cooling_device_register);
|
||||
|
||||
/**
|
||||
|
@ -1024,7 +1004,6 @@ void thermal_cooling_device_unregister(struct
|
|||
device_unregister(&cdev->device);
|
||||
return;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(thermal_cooling_device_unregister);
|
||||
|
||||
/**
|
||||
|
@ -1044,8 +1023,7 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
|
|||
|
||||
if (tz->ops->get_temp(tz, &temp)) {
|
||||
/* get_temp failed - retry it later */
|
||||
printk(KERN_WARNING PREFIX "failed to read out thermal zone "
|
||||
"%d\n", tz->id);
|
||||
pr_warn("failed to read out thermal zone %d\n", tz->id);
|
||||
goto leave;
|
||||
}
|
||||
|
||||
|
@ -1060,9 +1038,8 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
|
|||
ret = tz->ops->notify(tz, count,
|
||||
trip_type);
|
||||
if (!ret) {
|
||||
printk(KERN_EMERG
|
||||
"Critical temperature reached (%ld C), shutting down.\n",
|
||||
temp/1000);
|
||||
pr_emerg("Critical temperature reached (%ld C), shutting down\n",
|
||||
temp/1000);
|
||||
orderly_poweroff(true);
|
||||
}
|
||||
}
|
||||
|
@ -1100,7 +1077,7 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
|
|||
|
||||
tz->last_temperature = temp;
|
||||
|
||||
leave:
|
||||
leave:
|
||||
if (tz->passive)
|
||||
thermal_zone_device_set_polling(tz, tz->passive_delay);
|
||||
else if (tz->polling_delay)
|
||||
|
@ -1199,7 +1176,12 @@ struct thermal_zone_device *thermal_zone_device_register(char *type,
|
|||
}
|
||||
|
||||
for (count = 0; count < trips; count++) {
|
||||
TRIP_POINT_ATTR_ADD(&tz->device, count, result);
|
||||
result = device_create_file(&tz->device,
|
||||
&trip_point_attrs[count * 2]);
|
||||
if (result)
|
||||
break;
|
||||
result = device_create_file(&tz->device,
|
||||
&trip_point_attrs[count * 2 + 1]);
|
||||
if (result)
|
||||
goto unregister;
|
||||
tz->ops->get_trip_type(tz, count, &trip_type);
|
||||
|
@ -1235,12 +1217,11 @@ struct thermal_zone_device *thermal_zone_device_register(char *type,
|
|||
if (!result)
|
||||
return tz;
|
||||
|
||||
unregister:
|
||||
unregister:
|
||||
release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
|
||||
device_unregister(&tz->device);
|
||||
return ERR_PTR(result);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(thermal_zone_device_register);
|
||||
|
||||
/**
|
||||
|
@ -1279,9 +1260,12 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
|
|||
if (tz->ops->get_mode)
|
||||
device_remove_file(&tz->device, &dev_attr_mode);
|
||||
|
||||
for (count = 0; count < tz->trips; count++)
|
||||
TRIP_POINT_ATTR_REMOVE(&tz->device, count);
|
||||
|
||||
for (count = 0; count < tz->trips; count++) {
|
||||
device_remove_file(&tz->device,
|
||||
&trip_point_attrs[count * 2]);
|
||||
device_remove_file(&tz->device,
|
||||
&trip_point_attrs[count * 2 + 1]);
|
||||
}
|
||||
thermal_remove_hwmon_sysfs(tz);
|
||||
release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
|
||||
idr_destroy(&tz->idr);
|
||||
|
@ -1289,7 +1273,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
|
|||
device_unregister(&tz->device);
|
||||
return;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(thermal_zone_device_unregister);
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
|
@ -1312,10 +1295,11 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
|
|||
void *msg_header;
|
||||
int size;
|
||||
int result;
|
||||
static unsigned int thermal_event_seqnum;
|
||||
|
||||
/* allocate memory */
|
||||
size = nla_total_size(sizeof(struct thermal_genl_event)) + \
|
||||
nla_total_size(0);
|
||||
size = nla_total_size(sizeof(struct thermal_genl_event)) +
|
||||
nla_total_size(0);
|
||||
|
||||
skb = genlmsg_new(size, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
|
@ -1331,8 +1315,8 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
|
|||
}
|
||||
|
||||
/* fill the data */
|
||||
attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT, \
|
||||
sizeof(struct thermal_genl_event));
|
||||
attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT,
|
||||
sizeof(struct thermal_genl_event));
|
||||
|
||||
if (!attr) {
|
||||
nlmsg_free(skb);
|
||||
|
@ -1359,7 +1343,7 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
|
|||
|
||||
result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC);
|
||||
if (result)
|
||||
printk(KERN_INFO "failed to send netlink event:%d", result);
|
||||
pr_info("failed to send netlink event:%d\n", result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -85,6 +85,23 @@
|
|||
*/
|
||||
#define ACPI_CHECKSUM_ABORT FALSE
|
||||
|
||||
/*
|
||||
* Generate a version of ACPICA that only supports "reduced hardware"
|
||||
* platforms (as defined in ACPI 5.0). Set to TRUE to generate a specialized
|
||||
* version of ACPICA that ONLY supports the ACPI 5.0 "reduced hardware"
|
||||
* model. In other words, no ACPI hardware is supported.
|
||||
*
|
||||
* If TRUE, this means no support for the following:
|
||||
* PM Event and Control registers
|
||||
* SCI interrupt (and handler)
|
||||
* Fixed Events
|
||||
* General Purpose Events (GPEs)
|
||||
* Global Lock
|
||||
* ACPI PM timer
|
||||
* FACS table (Waking vectors and Global Lock)
|
||||
*/
|
||||
#define ACPI_REDUCED_HARDWARE FALSE
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* Subsystem Constants
|
||||
|
@ -93,7 +110,7 @@
|
|||
|
||||
/* Version of ACPI supported */
|
||||
|
||||
#define ACPI_CA_SUPPORT_LEVEL 3
|
||||
#define ACPI_CA_SUPPORT_LEVEL 5
|
||||
|
||||
/* Maximum count for a semaphore object */
|
||||
|
|
@ -57,6 +57,7 @@
|
|||
#define ACPI_SUCCESS(a) (!(a))
|
||||
#define ACPI_FAILURE(a) (a)
|
||||
|
||||
#define ACPI_SKIP(a) (a == AE_CTRL_SKIP)
|
||||
#define AE_OK (acpi_status) 0x0000
|
||||
|
||||
/*
|
||||
|
@ -89,8 +90,9 @@
|
|||
#define AE_SAME_HANDLER (acpi_status) (0x0019 | AE_CODE_ENVIRONMENTAL)
|
||||
#define AE_NO_HANDLER (acpi_status) (0x001A | AE_CODE_ENVIRONMENTAL)
|
||||
#define AE_OWNER_ID_LIMIT (acpi_status) (0x001B | AE_CODE_ENVIRONMENTAL)
|
||||
#define AE_NOT_CONFIGURED (acpi_status) (0x001C | AE_CODE_ENVIRONMENTAL)
|
||||
|
||||
#define AE_CODE_ENV_MAX 0x001B
|
||||
#define AE_CODE_ENV_MAX 0x001C
|
||||
|
||||
/*
|
||||
* Programmer exceptions
|
||||
|
@ -213,7 +215,8 @@ char const *acpi_gbl_exception_names_env[] = {
|
|||
"AE_ABORT_METHOD",
|
||||
"AE_SAME_HANDLER",
|
||||
"AE_NO_HANDLER",
|
||||
"AE_OWNER_ID_LIMIT"
|
||||
"AE_OWNER_ID_LIMIT",
|
||||
"AE_NOT_CONFIGURED"
|
||||
};
|
||||
|
||||
char const *acpi_gbl_exception_names_pgm[] = {
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
|
||||
/* Method names - these methods can appear anywhere in the namespace */
|
||||
|
||||
#define METHOD_NAME__SB_ "_SB_"
|
||||
#define METHOD_NAME__HID "_HID"
|
||||
#define METHOD_NAME__CID "_CID"
|
||||
#define METHOD_NAME__UID "_UID"
|
||||
|
@ -64,11 +65,11 @@
|
|||
|
||||
/* Method names - these methods must appear at the namespace root */
|
||||
|
||||
#define METHOD_NAME__BFS "\\_BFS"
|
||||
#define METHOD_NAME__GTS "\\_GTS"
|
||||
#define METHOD_NAME__PTS "\\_PTS"
|
||||
#define METHOD_NAME__SST "\\_SI._SST"
|
||||
#define METHOD_NAME__WAK "\\_WAK"
|
||||
#define METHOD_PATHNAME__BFS "\\_BFS"
|
||||
#define METHOD_PATHNAME__GTS "\\_GTS"
|
||||
#define METHOD_PATHNAME__PTS "\\_PTS"
|
||||
#define METHOD_PATHNAME__SST "\\_SI._SST"
|
||||
#define METHOD_PATHNAME__WAK "\\_WAK"
|
||||
|
||||
/* Definitions of the predefined namespace names */
|
||||
|
||||
|
@ -79,6 +80,5 @@
|
|||
#define ACPI_PREFIX_LOWER (u32) 0x69706361 /* "acpi" */
|
||||
|
||||
#define ACPI_NS_ROOT_PATH "\\"
|
||||
#define ACPI_NS_SYSTEM_BUS "_SB_"
|
||||
|
||||
#endif /* __ACNAMES_H__ */
|
||||
|
|
|
@ -323,6 +323,8 @@ int acpi_bus_set_power(acpi_handle handle, int state);
|
|||
int acpi_bus_update_power(acpi_handle handle, int *state_p);
|
||||
bool acpi_bus_power_manageable(acpi_handle handle);
|
||||
bool acpi_bus_can_wakeup(acpi_handle handle);
|
||||
int acpi_power_resource_register_device(struct device *dev, acpi_handle handle);
|
||||
void acpi_power_resource_unregister_device(struct device *dev, acpi_handle handle);
|
||||
#ifdef CONFIG_ACPI_PROC_EVENT
|
||||
int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data);
|
||||
int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data);
|
||||
|
@ -392,8 +394,13 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
int acpi_pm_device_run_wake(struct device *, bool);
|
||||
int acpi_pm_device_sleep_wake(struct device *, bool);
|
||||
#else
|
||||
static inline int acpi_pm_device_run_wake(struct device *dev, bool enable)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
|
||||
{
|
||||
return -ENODEV;
|
||||
|
|
|
@ -95,6 +95,11 @@ acpi_status
|
|||
acpi_os_table_override(struct acpi_table_header *existing_table,
|
||||
struct acpi_table_header **new_table);
|
||||
|
||||
acpi_status
|
||||
acpi_os_physical_table_override(struct acpi_table_header *existing_table,
|
||||
acpi_physical_address * new_address,
|
||||
u32 *new_table_length);
|
||||
|
||||
/*
|
||||
* Spinlock primitives
|
||||
*/
|
||||
|
@ -217,14 +222,10 @@ acpi_status acpi_os_write_port(acpi_io_address address, u32 value, u32 width);
|
|||
* Platform and hardware-independent physical memory interfaces
|
||||
*/
|
||||
acpi_status
|
||||
acpi_os_read_memory(acpi_physical_address address, u32 * value, u32 width);
|
||||
acpi_status
|
||||
acpi_os_read_memory64(acpi_physical_address address, u64 *value, u32 width);
|
||||
acpi_os_read_memory(acpi_physical_address address, u64 *value, u32 width);
|
||||
|
||||
acpi_status
|
||||
acpi_os_write_memory(acpi_physical_address address, u32 value, u32 width);
|
||||
acpi_status
|
||||
acpi_os_write_memory64(acpi_physical_address address, u64 value, u32 width);
|
||||
acpi_os_write_memory(acpi_physical_address address, u64 value, u32 width);
|
||||
|
||||
/*
|
||||
* Platform and hardware-independent PCI configuration space access
|
||||
|
|
|
@ -47,8 +47,9 @@
|
|||
|
||||
/* Current ACPICA subsystem version in YYYYMMDD format */
|
||||
|
||||
#define ACPI_CA_VERSION 0x20120111
|
||||
#define ACPI_CA_VERSION 0x20120320
|
||||
|
||||
#include "acconfig.h"
|
||||
#include "actypes.h"
|
||||
#include "actbl.h"
|
||||
|
||||
|
@ -71,6 +72,33 @@ extern u8 acpi_gbl_copy_dsdt_locally;
|
|||
extern u8 acpi_gbl_truncate_io_addresses;
|
||||
extern u8 acpi_gbl_disable_auto_repair;
|
||||
|
||||
/*
|
||||
* Hardware-reduced prototypes. All interfaces that use these macros will
|
||||
* be configured out of the ACPICA build if the ACPI_REDUCED_HARDWARE flag
|
||||
* is set to TRUE.
|
||||
*/
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
#define ACPI_HW_DEPENDENT_RETURN_STATUS(prototype) \
|
||||
prototype;
|
||||
|
||||
#define ACPI_HW_DEPENDENT_RETURN_OK(prototype) \
|
||||
prototype;
|
||||
|
||||
#define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \
|
||||
prototype;
|
||||
|
||||
#else
|
||||
#define ACPI_HW_DEPENDENT_RETURN_STATUS(prototype) \
|
||||
static ACPI_INLINE prototype {return(AE_NOT_CONFIGURED);}
|
||||
|
||||
#define ACPI_HW_DEPENDENT_RETURN_OK(prototype) \
|
||||
static ACPI_INLINE prototype {return(AE_OK);}
|
||||
|
||||
#define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \
|
||||
static ACPI_INLINE prototype {}
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
||||
extern u32 acpi_current_gpe_count;
|
||||
extern struct acpi_table_fadt acpi_gbl_FADT;
|
||||
extern u8 acpi_gbl_system_awake_and_running;
|
||||
|
@ -96,9 +124,8 @@ acpi_status acpi_terminate(void);
|
|||
acpi_status acpi_subsystem_status(void);
|
||||
#endif
|
||||
|
||||
acpi_status acpi_enable(void);
|
||||
|
||||
acpi_status acpi_disable(void);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable(void))
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable(void))
|
||||
|
||||
#ifdef ACPI_FUTURE_USAGE
|
||||
acpi_status acpi_get_system_info(struct acpi_buffer *ret_buffer);
|
||||
|
@ -235,17 +262,34 @@ acpi_status acpi_get_parent(acpi_handle object, acpi_handle * out_handle);
|
|||
acpi_status
|
||||
acpi_install_initialization_handler(acpi_init_handler handler, u32 function);
|
||||
|
||||
acpi_status
|
||||
acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler,
|
||||
void *context);
|
||||
|
||||
acpi_status
|
||||
acpi_install_fixed_event_handler(u32 acpi_event,
|
||||
acpi_event_handler handler, void *context);
|
||||
|
||||
acpi_status
|
||||
acpi_remove_fixed_event_handler(u32 acpi_event, acpi_event_handler handler);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_install_global_event_handler
|
||||
(ACPI_GBL_EVENT_HANDLER handler, void *context))
|
||||
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_install_fixed_event_handler(u32
|
||||
acpi_event,
|
||||
acpi_event_handler
|
||||
handler,
|
||||
void
|
||||
*context))
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_remove_fixed_event_handler(u32 acpi_event,
|
||||
acpi_event_handler
|
||||
handler))
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_install_gpe_handler(acpi_handle
|
||||
gpe_device,
|
||||
u32 gpe_number,
|
||||
u32 type,
|
||||
acpi_gpe_handler
|
||||
address,
|
||||
void *context))
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_remove_gpe_handler(acpi_handle gpe_device,
|
||||
u32 gpe_number,
|
||||
acpi_gpe_handler
|
||||
address))
|
||||
acpi_status
|
||||
acpi_install_notify_handler(acpi_handle device,
|
||||
u32 handler_type,
|
||||
|
@ -266,15 +310,6 @@ acpi_remove_address_space_handler(acpi_handle device,
|
|||
acpi_adr_space_type space_id,
|
||||
acpi_adr_space_handler handler);
|
||||
|
||||
acpi_status
|
||||
acpi_install_gpe_handler(acpi_handle gpe_device,
|
||||
u32 gpe_number,
|
||||
u32 type, acpi_gpe_handler address, void *context);
|
||||
|
||||
acpi_status
|
||||
acpi_remove_gpe_handler(acpi_handle gpe_device,
|
||||
u32 gpe_number, acpi_gpe_handler address);
|
||||
|
||||
#ifdef ACPI_FUTURE_USAGE
|
||||
acpi_status acpi_install_exception_handler(acpi_exception_handler handler);
|
||||
#endif
|
||||
|
@ -284,9 +319,11 @@ acpi_status acpi_install_interface_handler(acpi_interface_handler handler);
|
|||
/*
|
||||
* Global Lock interfaces
|
||||
*/
|
||||
acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle);
|
||||
|
||||
acpi_status acpi_release_global_lock(u32 handle);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_acquire_global_lock(u16 timeout,
|
||||
u32 *handle))
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_release_global_lock(u32 handle))
|
||||
|
||||
/*
|
||||
* Interfaces to AML mutex objects
|
||||
|
@ -299,47 +336,75 @@ acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname);
|
|||
/*
|
||||
* Fixed Event interfaces
|
||||
*/
|
||||
acpi_status acpi_enable_event(u32 event, u32 flags);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_enable_event(u32 event, u32 flags))
|
||||
|
||||
acpi_status acpi_disable_event(u32 event, u32 flags);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_disable_event(u32 event, u32 flags))
|
||||
|
||||
acpi_status acpi_clear_event(u32 event);
|
||||
|
||||
acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_clear_event(u32 event))
|
||||
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_get_event_status(u32 event,
|
||||
acpi_event_status
|
||||
*event_status))
|
||||
/*
|
||||
* General Purpose Event (GPE) Interfaces
|
||||
*/
|
||||
acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_update_all_gpes(void))
|
||||
|
||||
acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_enable_gpe(acpi_handle gpe_device,
|
||||
u32 gpe_number))
|
||||
|
||||
acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_disable_gpe(acpi_handle gpe_device,
|
||||
u32 gpe_number))
|
||||
|
||||
acpi_status
|
||||
acpi_setup_gpe_for_wake(acpi_handle parent_device,
|
||||
acpi_handle gpe_device, u32 gpe_number);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_clear_gpe(acpi_handle gpe_device,
|
||||
u32 gpe_number))
|
||||
|
||||
acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_set_gpe(acpi_handle gpe_device,
|
||||
u32 gpe_number, u8 action))
|
||||
|
||||
acpi_status
|
||||
acpi_get_gpe_status(acpi_handle gpe_device,
|
||||
u32 gpe_number, acpi_event_status *event_status);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_finish_gpe(acpi_handle gpe_device,
|
||||
u32 gpe_number))
|
||||
|
||||
acpi_status acpi_disable_all_gpes(void);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_setup_gpe_for_wake(acpi_handle
|
||||
parent_device,
|
||||
acpi_handle gpe_device,
|
||||
u32 gpe_number))
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_set_gpe_wake_mask(acpi_handle gpe_device,
|
||||
u32 gpe_number,
|
||||
u8 action))
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_get_gpe_status(acpi_handle gpe_device,
|
||||
u32 gpe_number,
|
||||
acpi_event_status
|
||||
*event_status))
|
||||
|
||||
acpi_status acpi_enable_all_runtime_gpes(void);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
|
||||
|
||||
acpi_status acpi_get_gpe_device(u32 gpe_index, acpi_handle *gpe_device);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
|
||||
|
||||
acpi_status
|
||||
acpi_install_gpe_block(acpi_handle gpe_device,
|
||||
struct acpi_generic_address *gpe_block_address,
|
||||
u32 register_count, u32 interrupt_number);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_get_gpe_device(u32 gpe_index,
|
||||
acpi_handle * gpe_device))
|
||||
|
||||
acpi_status acpi_remove_gpe_block(acpi_handle gpe_device);
|
||||
|
||||
acpi_status acpi_update_all_gpes(void);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_install_gpe_block(acpi_handle gpe_device,
|
||||
struct
|
||||
acpi_generic_address
|
||||
*gpe_block_address,
|
||||
u32 register_count,
|
||||
u32 interrupt_number))
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_remove_gpe_block(acpi_handle gpe_device))
|
||||
|
||||
/*
|
||||
* Resource interfaces
|
||||
|
@ -391,33 +456,59 @@ acpi_buffer_to_resource(u8 *aml_buffer,
|
|||
*/
|
||||
acpi_status acpi_reset(void);
|
||||
|
||||
acpi_status acpi_read_bit_register(u32 register_id, u32 *return_value);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_read_bit_register(u32 register_id,
|
||||
u32 *return_value))
|
||||
|
||||
acpi_status acpi_write_bit_register(u32 register_id, u32 value);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_write_bit_register(u32 register_id,
|
||||
u32 value))
|
||||
|
||||
acpi_status acpi_set_firmware_waking_vector(u32 physical_address);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_set_firmware_waking_vector(u32
|
||||
physical_address))
|
||||
|
||||
#if ACPI_MACHINE_WIDTH == 64
|
||||
acpi_status acpi_set_firmware_waking_vector64(u64 physical_address);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_set_firmware_waking_vector64(u64
|
||||
physical_address))
|
||||
#endif
|
||||
|
||||
acpi_status acpi_read(u64 *value, struct acpi_generic_address *reg);
|
||||
|
||||
acpi_status acpi_write(u64 value, struct acpi_generic_address *reg);
|
||||
|
||||
/*
|
||||
* Sleep/Wake interfaces
|
||||
*/
|
||||
acpi_status
|
||||
acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b);
|
||||
|
||||
acpi_status acpi_enter_sleep_state_prep(u8 sleep_state);
|
||||
|
||||
acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state);
|
||||
acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags);
|
||||
|
||||
acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void);
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void))
|
||||
|
||||
acpi_status acpi_leave_sleep_state_prep(u8 sleep_state);
|
||||
acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags);
|
||||
|
||||
acpi_status acpi_leave_sleep_state(u8 sleep_state);
|
||||
|
||||
/*
|
||||
* ACPI Timer interfaces
|
||||
*/
|
||||
#ifdef ACPI_FUTURE_USAGE
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_get_timer_resolution(u32 *resolution))
|
||||
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_get_timer(u32 *ticks))
|
||||
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_get_timer_duration(u32 start_ticks,
|
||||
u32 end_ticks,
|
||||
u32 *time_elapsed))
|
||||
#endif /* ACPI_FUTURE_USAGE */
|
||||
|
||||
/*
|
||||
* Error/Warning output
|
||||
*/
|
||||
|
|
|
@ -309,6 +309,13 @@ enum acpi_prefered_pm_profiles {
|
|||
PM_TABLET = 8
|
||||
};
|
||||
|
||||
/* Values for sleep_status and sleep_control registers (V5 FADT) */
|
||||
|
||||
#define ACPI_X_WAKE_STATUS 0x80
|
||||
#define ACPI_X_SLEEP_TYPE_MASK 0x1C
|
||||
#define ACPI_X_SLEEP_TYPE_POSITION 0x02
|
||||
#define ACPI_X_SLEEP_ENABLE 0x20
|
||||
|
||||
/* Reset to default packing */
|
||||
|
||||
#pragma pack()
|
||||
|
|
|
@ -517,6 +517,13 @@ typedef u64 acpi_integer;
|
|||
#define ACPI_SLEEP_TYPE_MAX 0x7
|
||||
#define ACPI_SLEEP_TYPE_INVALID 0xFF
|
||||
|
||||
/*
|
||||
* Sleep/Wake flags
|
||||
*/
|
||||
#define ACPI_NO_OPTIONAL_METHODS 0x00 /* Do not execute any optional methods */
|
||||
#define ACPI_EXECUTE_GTS 0x01 /* For enter sleep interface */
|
||||
#define ACPI_EXECUTE_BFS 0x02 /* For leave sleep prep interface */
|
||||
|
||||
/*
|
||||
* Standard notify values
|
||||
*/
|
||||
|
@ -532,8 +539,9 @@ typedef u64 acpi_integer;
|
|||
#define ACPI_NOTIFY_DEVICE_PLD_CHECK (u8) 0x09
|
||||
#define ACPI_NOTIFY_RESERVED (u8) 0x0A
|
||||
#define ACPI_NOTIFY_LOCALITY_UPDATE (u8) 0x0B
|
||||
#define ACPI_NOTIFY_SHUTDOWN_REQUEST (u8) 0x0C
|
||||
|
||||
#define ACPI_NOTIFY_MAX 0x0B
|
||||
#define ACPI_NOTIFY_MAX 0x0C
|
||||
|
||||
/*
|
||||
* Types associated with ACPI names and objects. The first group of
|
||||
|
@ -698,7 +706,8 @@ typedef u32 acpi_event_status;
|
|||
#define ACPI_ALL_NOTIFY (ACPI_SYSTEM_NOTIFY | ACPI_DEVICE_NOTIFY)
|
||||
#define ACPI_MAX_NOTIFY_HANDLER_TYPE 0x3
|
||||
|
||||
#define ACPI_MAX_SYS_NOTIFY 0x7f
|
||||
#define ACPI_MAX_SYS_NOTIFY 0x7F
|
||||
#define ACPI_MAX_DEVICE_SPECIFIC_NOTIFY 0xBF
|
||||
|
||||
/* Address Space (Operation Region) Types */
|
||||
|
||||
|
@ -786,6 +795,15 @@ typedef u8 acpi_adr_space_type;
|
|||
#define ACPI_ENABLE_EVENT 1
|
||||
#define ACPI_DISABLE_EVENT 0
|
||||
|
||||
/* Sleep function dispatch */
|
||||
|
||||
typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state, u8 flags);
|
||||
|
||||
struct acpi_sleep_functions {
|
||||
ACPI_SLEEP_FUNCTION legacy_function;
|
||||
ACPI_SLEEP_FUNCTION extended_function;
|
||||
};
|
||||
|
||||
/*
|
||||
* External ACPI object definition
|
||||
*/
|
||||
|
|
|
@ -372,4 +372,14 @@ static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
|
|||
|
||||
#endif /* !CONFIG_ACPI */
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
|
||||
u32 pm1a_ctrl, u32 pm1b_ctrl));
|
||||
|
||||
acpi_status acpi_os_prepare_sleep(u8 sleep_state,
|
||||
u32 pm1a_control, u32 pm1b_control);
|
||||
#else
|
||||
#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
|
||||
#endif
|
||||
|
||||
#endif /*_LINUX_ACPI_H*/
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/hrtimer.h>
|
||||
|
||||
#define CPUIDLE_STATE_MAX 8
|
||||
#define CPUIDLE_NAME_LEN 16
|
||||
|
@ -43,12 +44,15 @@ struct cpuidle_state {
|
|||
|
||||
unsigned int flags;
|
||||
unsigned int exit_latency; /* in US */
|
||||
unsigned int power_usage; /* in mW */
|
||||
int power_usage; /* in mW */
|
||||
unsigned int target_residency; /* in US */
|
||||
unsigned int disable;
|
||||
|
||||
int (*enter) (struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv,
|
||||
int index);
|
||||
|
||||
int (*enter_dead) (struct cpuidle_device *dev, int index);
|
||||
};
|
||||
|
||||
/* Idle State Flags */
|
||||
|
@ -96,7 +100,6 @@ struct cpuidle_device {
|
|||
struct list_head device_list;
|
||||
struct kobject kobj;
|
||||
struct completion kobj_unregister;
|
||||
void *governor_data;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
|
||||
|
@ -118,10 +121,12 @@ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
|
|||
****************************/
|
||||
|
||||
struct cpuidle_driver {
|
||||
char name[CPUIDLE_NAME_LEN];
|
||||
const char *name;
|
||||
struct module *owner;
|
||||
|
||||
unsigned int power_specified:1;
|
||||
/* set to 1 to use the core cpuidle time keeping (for all states). */
|
||||
unsigned int en_core_tk_irqen:1;
|
||||
struct cpuidle_state states[CPUIDLE_STATE_MAX];
|
||||
int state_count;
|
||||
int safe_state_index;
|
||||
|
@ -140,6 +145,11 @@ extern void cpuidle_pause_and_lock(void);
|
|||
extern void cpuidle_resume_and_unlock(void);
|
||||
extern int cpuidle_enable_device(struct cpuidle_device *dev);
|
||||
extern void cpuidle_disable_device(struct cpuidle_device *dev);
|
||||
extern int cpuidle_wrap_enter(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index,
|
||||
int (*enter)(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index));
|
||||
extern int cpuidle_play_dead(void);
|
||||
|
||||
#else
|
||||
static inline void disable_cpuidle(void) { }
|
||||
|
@ -157,6 +167,12 @@ static inline void cpuidle_resume_and_unlock(void) { }
|
|||
static inline int cpuidle_enable_device(struct cpuidle_device *dev)
|
||||
{return -ENODEV; }
|
||||
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
|
||||
static inline int cpuidle_wrap_enter(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index,
|
||||
int (*enter)(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index))
|
||||
{ return -ENODEV; }
|
||||
static inline int cpuidle_play_dead(void) {return -ENODEV; }
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* SPEAr thermal driver platform data.
|
||||
*
|
||||
* Copyright (C) 2011-2012 ST Microelectronics
|
||||
* Author: Vincenzo Frascino <vincenzo.frascino@st.com>
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#ifndef SPEAR_THERMAL_H
|
||||
#define SPEAR_THERMAL_H
|
||||
|
||||
/* SPEAr Thermal Sensor Platform Data */
|
||||
struct spear_thermal_pdata {
|
||||
/* flags used to enable thermal sensor */
|
||||
unsigned int thermal_flags;
|
||||
};
|
||||
|
||||
#endif /* SPEAR_THERMAL_H */
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче