2018-03-15 02:13:07 +03:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
|
2005-04-17 02:20:36 +04:00
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* Name: acevents.h - Event subcomponent prototypes and defines
|
|
|
|
*
|
2022-04-11 21:54:22 +03:00
|
|
|
* Copyright (C) 2000 - 2022, Intel Corp.
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
2018-03-15 02:13:07 +03:00
|
|
|
*****************************************************************************/
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#ifndef __ACEVENTS_H__
|
|
|
|
#define __ACEVENTS_H__
|
|
|
|
|
2018-03-15 02:12:59 +03:00
|
|
|
/*
|
|
|
|
* Conditions to trigger post enabling GPE polling:
|
|
|
|
* It is not sufficient to trigger edge-triggered GPE with specific GPE
|
|
|
|
* chips, software need to poll once after enabling.
|
|
|
|
*/
|
|
|
|
#ifdef ACPI_USE_GPE_POLLING
|
|
|
|
#define ACPI_GPE_IS_POLLING_NEEDED(__gpe__) \
|
|
|
|
((__gpe__)->runtime_count == 1 && \
|
|
|
|
(__gpe__)->flags & ACPI_GPE_INITIALIZED && \
|
|
|
|
((__gpe__)->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_EDGE_TRIGGERED)
|
|
|
|
#else
|
|
|
|
#define ACPI_GPE_IS_POLLING_NEEDED(__gpe__) FALSE
|
|
|
|
#endif
|
|
|
|
|
2005-04-19 06:49:35 +04:00
|
|
|
/*
|
|
|
|
* evevent
|
|
|
|
*/
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_status acpi_ev_initialize_events(void);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_status acpi_ev_install_xrupt_handlers(void);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-08-05 08:44:28 +04:00
|
|
|
u32 acpi_ev_fixed_event_detect(void);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
2005-04-19 06:49:35 +04:00
|
|
|
* evmisc
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2005-08-05 08:44:28 +04:00
|
|
|
u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-08-05 08:44:28 +04:00
|
|
|
u32 acpi_ev_get_gpe_number_index(u32 gpe_number);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
acpi_status
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
|
|
|
|
u32 notify_value);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2011-04-13 07:33:17 +04:00
|
|
|
/*
|
|
|
|
* evglock - Global Lock support
|
|
|
|
*/
|
|
|
|
acpi_status acpi_ev_init_global_lock_handler(void);
|
|
|
|
|
2012-02-14 14:14:27 +04:00
|
|
|
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
|
|
|
|
acpi_ev_acquire_global_lock(u16 timeout))
|
2013-10-29 05:30:22 +04:00
|
|
|
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
|
2016-05-05 07:57:53 +03:00
|
|
|
|
ACPICA: Linuxize: Cleanup spaces after special macro invocations.
This patch reflects the improvment of a cleanup step which is performed in
the release process.
There are still spaces in the "linuxized" ACPICA files after special macro
invocations. This is because indent treats comments and pre-processor
directives as spaces, thus we need to skip them.
Before applying this patch, cleanup code will search from keyword back to
end of line and wipe spaces between them.
After applying this patch, cleanup code will search to the end of the macro
invocations, skip "empty lines", "comments" and "pre-processor directives",
then wipe the spaces between the new line and the first non-spaces
characters.
Following improvements are thus achieved in the release automation by this
commit which are originally maintained manually:
- acpi_status acpi_ev_remove_global_lock_handler(void);
+acpi_status acpi_ev_remove_global_lock_handler(void);
- acpi_status
+acpi_status
acpi_ev_match_gpe_method(acpi_handle obj_handle,
- acpi_status acpi_subsystem_status(void);
+acpi_status acpi_subsystem_status(void);
- acpi_status acpi_install_notify_handler(acpi_handle device, u32 handler_type,
+acpi_status acpi_install_notify_handler(acpi_handle device, u32 handler_type,
- acpi_status
+acpi_status
acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout);
- acpi_status
+acpi_status
acpi_get_sleep_type_data(u8 sleep_state, u8 *slp_typ_a, u8 *slp_typ_b);
- acpi_status acpi_leave_sleep_state_prep(u8 sleep_state);
+acpi_status acpi_leave_sleep_state_prep(u8 sleep_state);
Some empty lines are restored by this commit due to the change of the
removal implementation.
Signed-off-by: Lv Zheng <lv.zheng@intel.com>
Signed-off-by: Bob Moore <robert.moore@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-01-08 09:44:56 +04:00
|
|
|
acpi_status acpi_ev_remove_global_lock_handler(void);
|
2011-04-13 07:33:17 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
2010-04-27 07:41:19 +04:00
|
|
|
* evgpe - Low-level GPE support
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2010-04-27 07:41:19 +04:00
|
|
|
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
acpi_status
|
2010-07-01 06:11:45 +04:00
|
|
|
acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-07-01 07:01:12 +04:00
|
|
|
acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
|
2010-08-04 01:55:14 +04:00
|
|
|
|
2016-08-04 11:43:39 +03:00
|
|
|
acpi_status
|
|
|
|
acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked);
|
|
|
|
|
2012-10-31 06:25:45 +04:00
|
|
|
acpi_status
|
ACPICA: Clear status of GPEs on first direct enable
ACPI GPEs (other than the EC one) can be enabled in two situations.
First, the GPEs with existing _Lxx and _Exx methods are enabled
implicitly by ACPICA during system initialization. Second, the
GPEs without these methods (like GPEs listed by _PRW objects for
wakeup devices) need to be enabled directly by the code that is
going to use them (e.g. ACPI power management or device drivers).
In the former case, if the status of a given GPE is set to start
with, its handler method (either _Lxx or _Exx) needs to be invoked
to take care of the events (possibly) signaled before the GPE was
enabled. In the latter case, however, the first caller of
acpi_enable_gpe() for a given GPE should not be expected to care
about any events that might be signaled through it earlier. In
that case, it is better to clear the status of the GPE before
enabling it, to prevent stale events from triggering unwanted
actions (like spurious system resume, for example).
For this reason, modify acpi_ev_add_gpe_reference() to take an
additional boolean argument indicating whether or not the GPE
status needs to be cleared when its reference counter changes from
zero to one and make acpi_enable_gpe() pass TRUE to it through
that new argument.
Fixes: 18996f2db918 ("ACPICA: Events: Stop unconditionally clearing ACPI IRQs during suspend/resume")
Reported-by: Furquan Shaikh <furquan@google.com>
Tested-by: Furquan Shaikh <furquan@google.com>
Tested-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2019-06-17 14:31:45 +03:00
|
|
|
acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info,
|
|
|
|
u8 clear_on_enable);
|
2010-08-04 01:55:14 +04:00
|
|
|
|
2012-10-31 06:25:45 +04:00
|
|
|
acpi_status
|
|
|
|
acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
|
2010-07-01 07:01:12 +04:00
|
|
|
|
2005-08-05 08:44:28 +04:00
|
|
|
struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
|
|
|
|
u32 gpe_number);
|
2005-04-19 06:49:35 +04:00
|
|
|
|
2010-04-06 10:52:37 +04:00
|
|
|
struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
|
|
|
|
struct acpi_gpe_block_info
|
|
|
|
*gpe_block);
|
|
|
|
|
2010-12-13 08:39:17 +03:00
|
|
|
acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info);
|
|
|
|
|
ACPICA: Events: Add parallel GPE handling support to fix potential redundant _Exx evaluations
There is a risk that a GPE method/handler may be invoked twice. Let's
consider a case, both GPE0(RAW_HANDLER) and GPE1(_Exx) is triggered.
=======================================+=============================
IRQ handler (top-half) |IRQ polling
=======================================+=============================
acpi_ev_detect_gpe() |
LOCK() |
READ (GPE0-7 enable/status registers)|
^^^^^^^^^^^^ROOT CAUSE^^^^^^^^^^^^^^^|
Walk GPE0 |
UNLOCK() |LOCK()
Invoke GPE0 RAW_HANDLER |READ (GPE1 enable/status bit)
|acpi_ev_gpe_dispatch(irq=false)
| CLEAR (GPE1 enable bit)
| CLEAR (GPE1 status bit)
LOCK() |UNLOCK()
Walk GPE1 +=============================
acpi_ev_gpe_dispatch(irq=true) |IRQ polling (defer)
CLEAR (GPE1 enable bit) +=============================
CLEAR (GPE1 status bit) |acpi_ev_async_execute_gpe_method()
Walk others | Evaluate GPE1 _Exx
fi | acpi_ev_async_enable_gpe()
UNLOCK() | LOCK()
=======================================+ SET (GPE enable bit)
IRQ handler (bottom-half) | UNLOCK()
=======================================+
acpi_ev_async_execute_gpe_method() |
Evaluate GPE1 _Exx |
acpi_ev_async_enable_gpe() |
LOCK() |
SET (GPE1 enable bit) |
UNLOCK() |
=======================================+=============================
If acpi_ev_detect_gpe() is only invoked from the IRQ context, there won't be
more than one _Lxx/_Exx evaluations for one status bit flagging if the IRQ
handlers controlled by the underlying IRQ chip/driver (ex. APIC) are run in
serial. Note that, this is a known potential gap and we had an approach,
locking entire non-raw-handler processes in the top-half IRQ handler and
handling all raw-handlers out of the locked loop to be friendly to those
IRQ chip/driver. But the approach is too complicated while the issue is not
so real, thus ACPICA treated such issue (if any) as a parallelism/quality
issue of the underlying IRQ chip/driver to stop putting it on the radar.
Bug in link #1 is suspiciously reflecting the same cause, and if so, it can
also be fixed by this simpler approach.
But it will be no excuse an ACPICA problem now if ACPICA starts to poll
IRQs itself. In the changed scenario, _Exx will be evaluated from the task
context due to new ACPICA provided "polling after enabling GPEs" mechanism.
And the above figure uses edge-triggered GPEs demonstrating the possibility
of evaluating _Exx twice for one status bit flagging.
As a conclusion, there is now an increased chance of evaluating _Lxx/_Exx
more than once for one status bit flagging.
However this is still not a real problem if the _Lxx/_Exx checks the
underlying hardware IRQ reasoning and finally just changes the 2nd and the
follow-up evaluations into no-ops. Note that _Lxx should always be written
in this way as a level-trigger GPE could have it's status wrongly
duplicated by the underlying IRQ delivery mechanisms. But _Exx may have
very low quality BIOS by BIOS to trigger real issues. For example, trigger
duplicated button notifications.
To solve this issue, we need to stop reading a bunch of enable/status
register bits, but read only one GPE's enable/status bit. And GPE status
register's W1C nature ensures that acknowledging one GPE won't affect
another GPEs' status bits. Thus the hardware GPE architecture has already
provided us with the mechanism of implementing such parallelism.
So we can lock around one GPE handling process to achieve the parallelism:
1. If we can incorporate GPE enable bit check in detection and ensure the
atomicity of the following process (top-half IRQ handler):
READ (enable/status bit)
if (enabled && raised)
CLEAR (enable bit)
and handle the GPE after this process, we can ensure that we will only
invoke GPE handler once for one status bit flagging.
2. In addtion for edge-triggered GPEs, if we can ensure the atomicity of
the following process (top-half IRQ handler):
READ (enable/status bit)
if (enabled && raised)
CLEAR (enable bit)
CLEAR (status bit)
and handle the GPE after this process, we can ensure that we will only
invoke GPE handler once for one status bit flagging.
By doing a cleanup in this way, we can remove duplicate GPE handling code
and ensure that all logics are collected in 1 function. And the function
will be safe for both IRQ interrupt and IRQ polling, and will be safe for
us to release and re-acquire acpi_gbl_gpe_lock at any time rather than raw
handler only during the top-half IRQ handler. Lv Zheng.
Link: https://bugzilla.kernel.org/show_bug.cgi?id=196703 [#1]
Signed-off-by: Lv Zheng <lv.zheng@intel.com>
Signed-off-by: Erik Schmauss <erik.schmauss@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2018-03-15 02:12:58 +03:00
|
|
|
u32
|
|
|
|
acpi_ev_detect_gpe(struct acpi_namespace_node *gpe_device,
|
|
|
|
struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number);
|
|
|
|
|
2005-04-19 06:49:35 +04:00
|
|
|
/*
|
2010-04-27 07:41:19 +04:00
|
|
|
* evgpeblk - Upper-level GPE block support
|
2005-04-19 06:49:35 +04:00
|
|
|
*/
|
2005-04-17 02:20:36 +04:00
|
|
|
acpi_status
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
|
2014-04-30 06:06:15 +04:00
|
|
|
u64 address,
|
|
|
|
u8 space_id,
|
2005-08-05 08:44:28 +04:00
|
|
|
u32 register_count,
|
2014-04-30 06:06:15 +04:00
|
|
|
u16 gpe_block_base_number,
|
2005-08-05 08:44:28 +04:00
|
|
|
u32 interrupt_number,
|
|
|
|
struct acpi_gpe_block_info **return_gpe_block);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-11-02 08:00:00 +03:00
|
|
|
acpi_status
|
2010-09-16 02:30:43 +04:00
|
|
|
acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
|
|
|
struct acpi_gpe_block_info *gpe_block,
|
2010-12-13 08:36:15 +03:00
|
|
|
void *context);
|
2005-11-02 08:00:00 +03:00
|
|
|
|
2012-02-14 14:14:27 +04:00
|
|
|
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
|
|
|
|
acpi_ev_delete_gpe_block(struct acpi_gpe_block_info
|
|
|
|
*gpe_block))
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
u32
|
2010-12-13 08:38:46 +03:00
|
|
|
acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
|
|
|
|
struct acpi_gpe_event_info *gpe_event_info,
|
2005-08-05 08:44:28 +04:00
|
|
|
u32 gpe_number);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-04-27 07:41:19 +04:00
|
|
|
/*
|
|
|
|
* evgpeinit - GPE initialization and update
|
|
|
|
*/
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_status acpi_ev_gpe_initialize(void);
|
2005-04-19 06:49:35 +04:00
|
|
|
|
2012-02-14 14:14:27 +04:00
|
|
|
ACPI_HW_DEPENDENT_RETURN_VOID(void
|
|
|
|
acpi_ev_update_gpes(acpi_owner_id table_owner_id))
|
2010-04-27 07:41:19 +04:00
|
|
|
|
ACPICA: Linuxize: Cleanup spaces after special macro invocations.
This patch reflects the improvment of a cleanup step which is performed in
the release process.
There are still spaces in the "linuxized" ACPICA files after special macro
invocations. This is because indent treats comments and pre-processor
directives as spaces, thus we need to skip them.
Before applying this patch, cleanup code will search from keyword back to
end of line and wipe spaces between them.
After applying this patch, cleanup code will search to the end of the macro
invocations, skip "empty lines", "comments" and "pre-processor directives",
then wipe the spaces between the new line and the first non-spaces
characters.
Following improvements are thus achieved in the release automation by this
commit which are originally maintained manually:
- acpi_status acpi_ev_remove_global_lock_handler(void);
+acpi_status acpi_ev_remove_global_lock_handler(void);
- acpi_status
+acpi_status
acpi_ev_match_gpe_method(acpi_handle obj_handle,
- acpi_status acpi_subsystem_status(void);
+acpi_status acpi_subsystem_status(void);
- acpi_status acpi_install_notify_handler(acpi_handle device, u32 handler_type,
+acpi_status acpi_install_notify_handler(acpi_handle device, u32 handler_type,
- acpi_status
+acpi_status
acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout);
- acpi_status
+acpi_status
acpi_get_sleep_type_data(u8 sleep_state, u8 *slp_typ_a, u8 *slp_typ_b);
- acpi_status acpi_leave_sleep_state_prep(u8 sleep_state);
+acpi_status acpi_leave_sleep_state_prep(u8 sleep_state);
Some empty lines are restored by this commit due to the change of the
removal implementation.
Signed-off-by: Lv Zheng <lv.zheng@intel.com>
Signed-off-by: Bob Moore <robert.moore@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-01-08 09:44:56 +04:00
|
|
|
acpi_status
|
2010-04-27 07:41:19 +04:00
|
|
|
acpi_ev_match_gpe_method(acpi_handle obj_handle,
|
|
|
|
u32 level, void *context, void **return_value);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* evgpeutil - GPE utilities
|
|
|
|
*/
|
|
|
|
acpi_status
|
|
|
|
acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
|
|
|
|
|
2010-12-13 08:36:02 +03:00
|
|
|
acpi_status
|
|
|
|
acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
|
|
|
struct acpi_gpe_block_info *gpe_block, void *context);
|
|
|
|
|
2014-01-08 09:44:10 +04:00
|
|
|
acpi_status
|
|
|
|
acpi_ev_get_gpe_xrupt_block(u32 interrupt_number,
|
|
|
|
struct acpi_gpe_xrupt_info **gpe_xrupt_block);
|
2010-04-27 07:41:19 +04:00
|
|
|
|
|
|
|
acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
|
|
|
|
|
|
|
|
acpi_status
|
|
|
|
acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
|
|
|
struct acpi_gpe_block_info *gpe_block,
|
|
|
|
void *context);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
2013-01-11 16:08:51 +04:00
|
|
|
* evhandler - Address space handling
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2015-12-29 09:02:00 +03:00
|
|
|
union acpi_operand_object *acpi_ev_find_region_handler(acpi_adr_space_type
|
|
|
|
space_id,
|
|
|
|
union acpi_operand_object
|
|
|
|
*handler_obj);
|
|
|
|
|
2013-01-11 16:08:51 +04:00
|
|
|
u8
|
|
|
|
acpi_ev_has_default_handler(struct acpi_namespace_node *node,
|
|
|
|
acpi_adr_space_type space_id);
|
|
|
|
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_status acpi_ev_install_region_handlers(void);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-11 16:08:51 +04:00
|
|
|
acpi_status
|
|
|
|
acpi_ev_install_space_handler(struct acpi_namespace_node *node,
|
|
|
|
acpi_adr_space_type space_id,
|
|
|
|
acpi_adr_space_handler handler,
|
|
|
|
acpi_adr_space_setup setup, void *context);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* evregion - Operation region support
|
|
|
|
*/
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_status acpi_ev_initialize_op_regions(void);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
acpi_status
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
2011-11-16 09:39:07 +04:00
|
|
|
union acpi_operand_object *field_obj,
|
2005-08-05 08:44:28 +04:00
|
|
|
u32 function,
|
2010-01-21 05:06:32 +03:00
|
|
|
u32 region_offset, u32 bit_width, u64 *value);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
acpi_status
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_ev_attach_region(union acpi_operand_object *handler_obj,
|
|
|
|
union acpi_operand_object *region_obj,
|
|
|
|
u8 acpi_ns_is_locked);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
void
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_ev_detach_region(union acpi_operand_object *region_obj,
|
|
|
|
u8 acpi_ns_is_locked);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2015-12-29 09:03:43 +03:00
|
|
|
void
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
|
2015-12-29 09:03:43 +03:00
|
|
|
acpi_adr_space_type space_id, u32 function);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
acpi_status
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
2005-04-19 06:49:35 +04:00
|
|
|
* evregini - Region initialization and setup
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
|
|
|
acpi_status
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_ev_system_memory_region_setup(acpi_handle handle,
|
|
|
|
u32 function,
|
|
|
|
void *handler_context,
|
|
|
|
void **region_context);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
acpi_status
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_ev_io_space_region_setup(acpi_handle handle,
|
|
|
|
u32 function,
|
|
|
|
void *handler_context, void **region_context);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
acpi_status
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_ev_pci_config_region_setup(acpi_handle handle,
|
|
|
|
u32 function,
|
|
|
|
void *handler_context, void **region_context);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
acpi_status
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_ev_cmos_region_setup(acpi_handle handle,
|
|
|
|
u32 function,
|
|
|
|
void *handler_context, void **region_context);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
acpi_status
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_ev_pci_bar_region_setup(acpi_handle handle,
|
|
|
|
u32 function,
|
|
|
|
void *handler_context, void **region_context);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2021-12-22 19:21:25 +03:00
|
|
|
acpi_status
|
|
|
|
acpi_ev_data_table_region_setup(acpi_handle handle,
|
|
|
|
u32 function,
|
|
|
|
void *handler_context, void **region_context);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
acpi_status
|
2005-08-05 08:44:28 +04:00
|
|
|
acpi_ev_default_region_setup(acpi_handle handle,
|
|
|
|
u32 function,
|
|
|
|
void *handler_context, void **region_context);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2016-11-30 10:21:12 +03:00
|
|
|
acpi_status acpi_ev_initialize_region(union acpi_operand_object *region_obj);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2018-10-03 21:45:38 +03:00
|
|
|
u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
2005-04-19 06:49:35 +04:00
|
|
|
* evsci - SCI (System Control Interrupt) handling/dispatch
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2005-08-05 08:44:28 +04:00
|
|
|
u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-09-23 05:52:05 +04:00
|
|
|
u32 acpi_ev_sci_dispatch(void);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-09-23 05:52:05 +04:00
|
|
|
u32 acpi_ev_install_sci_handler(void);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-09-23 05:52:05 +04:00
|
|
|
acpi_status acpi_ev_remove_all_sci_handlers(void);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-02-14 14:14:27 +04:00
|
|
|
ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_ev_terminate(void))
|
2005-08-05 08:44:28 +04:00
|
|
|
#endif /* __ACEVENTS_H__ */
|