|
@@ -374,17 +374,12 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
|
|
|
|
|
|
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
|
|
|
{
|
|
|
- acpi_status status;
|
|
|
struct acpi_gpe_block_info *gpe_block;
|
|
|
struct acpi_namespace_node *gpe_device;
|
|
|
struct acpi_gpe_register_info *gpe_register_info;
|
|
|
struct acpi_gpe_event_info *gpe_event_info;
|
|
|
u32 gpe_number;
|
|
|
- struct acpi_gpe_handler_info *gpe_handler_info;
|
|
|
u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
|
|
|
- u8 enabled_status_byte;
|
|
|
- u64 status_reg;
|
|
|
- u64 enable_reg;
|
|
|
acpi_cpu_flags flags;
|
|
|
u32 i;
|
|
|
u32 j;
|
|
@@ -441,49 +436,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
- /* Read the Status Register */
|
|
|
-
|
|
|
- status =
|
|
|
- acpi_hw_read(&status_reg,
|
|
|
- &gpe_register_info->status_address);
|
|
|
- if (ACPI_FAILURE(status)) {
|
|
|
- goto unlock_and_exit;
|
|
|
- }
|
|
|
-
|
|
|
- /* Read the Enable Register */
|
|
|
-
|
|
|
- status =
|
|
|
- acpi_hw_read(&enable_reg,
|
|
|
- &gpe_register_info->enable_address);
|
|
|
- if (ACPI_FAILURE(status)) {
|
|
|
- goto unlock_and_exit;
|
|
|
- }
|
|
|
-
|
|
|
- ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
|
|
|
- "Read registers for GPE %02X-%02X: Status=%02X, Enable=%02X, "
|
|
|
- "RunEnable=%02X, WakeEnable=%02X\n",
|
|
|
- gpe_register_info->base_gpe_number,
|
|
|
- gpe_register_info->base_gpe_number +
|
|
|
- (ACPI_GPE_REGISTER_WIDTH - 1),
|
|
|
- (u32)status_reg, (u32)enable_reg,
|
|
|
- gpe_register_info->enable_for_run,
|
|
|
- gpe_register_info->enable_for_wake));
|
|
|
-
|
|
|
- /* Check if there is anything active at all in this register */
|
|
|
-
|
|
|
- enabled_status_byte = (u8)(status_reg & enable_reg);
|
|
|
- if (!enabled_status_byte) {
|
|
|
-
|
|
|
- /* No active GPEs in this register, move on */
|
|
|
-
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
/* Now look at the individual GPEs in this byte register */
|
|
|
|
|
|
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
|
|
|
|
|
|
- /* Examine one GPE bit */
|
|
|
+ /* Detect and dispatch one GPE bit */
|
|
|
|
|
|
gpe_event_info =
|
|
|
&gpe_block->
|
|
@@ -491,71 +448,18 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
|
|
|
ACPI_GPE_REGISTER_WIDTH) + j];
|
|
|
gpe_number =
|
|
|
j + gpe_register_info->base_gpe_number;
|
|
|
-
|
|
|
- if (enabled_status_byte & (1 << j)) {
|
|
|
-
|
|
|
- /* Invoke global event handler if present */
|
|
|
-
|
|
|
- acpi_gpe_count++;
|
|
|
- if (acpi_gbl_global_event_handler) {
|
|
|
- acpi_gbl_global_event_handler
|
|
|
- (ACPI_EVENT_TYPE_GPE,
|
|
|
- gpe_device, gpe_number,
|
|
|
- acpi_gbl_global_event_handler_context);
|
|
|
- }
|
|
|
-
|
|
|
- /* Found an active GPE */
|
|
|
-
|
|
|
- if (ACPI_GPE_DISPATCH_TYPE
|
|
|
- (gpe_event_info->flags) ==
|
|
|
- ACPI_GPE_DISPATCH_RAW_HANDLER) {
|
|
|
-
|
|
|
- /* Dispatch the event to a raw handler */
|
|
|
-
|
|
|
- gpe_handler_info =
|
|
|
- gpe_event_info->dispatch.
|
|
|
- handler;
|
|
|
-
|
|
|
- /*
|
|
|
- * There is no protection around the namespace node
|
|
|
- * and the GPE handler to ensure a safe destruction
|
|
|
- * because:
|
|
|
- * 1. The namespace node is expected to always
|
|
|
- * exist after loading a table.
|
|
|
- * 2. The GPE handler is expected to be flushed by
|
|
|
- * acpi_os_wait_events_complete() before the
|
|
|
- * destruction.
|
|
|
- */
|
|
|
- acpi_os_release_lock
|
|
|
- (acpi_gbl_gpe_lock, flags);
|
|
|
- int_status |=
|
|
|
- gpe_handler_info->
|
|
|
- address(gpe_device,
|
|
|
- gpe_number,
|
|
|
- gpe_handler_info->
|
|
|
- context);
|
|
|
- flags =
|
|
|
- acpi_os_acquire_lock
|
|
|
- (acpi_gbl_gpe_lock);
|
|
|
- } else {
|
|
|
- /*
|
|
|
- * Dispatch the event to a standard handler or
|
|
|
- * method.
|
|
|
- */
|
|
|
- int_status |=
|
|
|
- acpi_ev_gpe_dispatch
|
|
|
- (gpe_device, gpe_event_info,
|
|
|
- gpe_number);
|
|
|
- }
|
|
|
- }
|
|
|
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
|
|
|
+ int_status |=
|
|
|
+ acpi_ev_detect_gpe(gpe_device,
|
|
|
+ gpe_event_info,
|
|
|
+ gpe_number);
|
|
|
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
gpe_block = gpe_block->next;
|
|
|
}
|
|
|
|
|
|
-unlock_and_exit:
|
|
|
-
|
|
|
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
|
|
|
return (int_status);
|
|
|
}
|
|
@@ -726,6 +630,127 @@ acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
|
|
|
}
|
|
|
|
|
|
|
|
|
+/*******************************************************************************
|
|
|
+ *
|
|
|
+ * FUNCTION: acpi_ev_detect_gpe
|
|
|
+ *
|
|
|
+ * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1
|
|
|
+ * gpe_event_info - Info for this GPE
|
|
|
+ * gpe_number - Number relative to the parent GPE block
|
|
|
+ *
|
|
|
+ * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
|
|
|
+ *
|
|
|
+ * DESCRIPTION: Detect and dispatch a General Purpose Event to either a function
|
|
|
+ * (e.g. EC) or method (e.g. _Lxx/_Exx) handler.
|
|
|
+ * NOTE: GPE is W1C, so it is possible to handle a single GPE from both
|
|
|
+ * task and irq context in parallel as long as the process to
|
|
|
+ * detect and mask the GPE is atomic.
|
|
|
+ * However the atomicity of ACPI_GPE_DISPATCH_RAW_HANDLER is
|
|
|
+ * dependent on the raw handler itself.
|
|
|
+ *
|
|
|
+ ******************************************************************************/
|
|
|
+
|
|
|
+u32
|
|
|
+acpi_ev_detect_gpe(struct acpi_namespace_node *gpe_device,
|
|
|
+ struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
|
|
|
+{
|
|
|
+ u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
|
|
|
+ u8 enabled_status_byte;
|
|
|
+ u64 status_reg;
|
|
|
+ u64 enable_reg;
|
|
|
+ u32 register_bit;
|
|
|
+ struct acpi_gpe_register_info *gpe_register_info;
|
|
|
+ struct acpi_gpe_handler_info *gpe_handler_info;
|
|
|
+ acpi_cpu_flags flags;
|
|
|
+ acpi_status status;
|
|
|
+
|
|
|
+ ACPI_FUNCTION_TRACE(ev_gpe_detect);
|
|
|
+
|
|
|
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
|
|
|
+
|
|
|
+ /* Get the info block for the entire GPE register */
|
|
|
+
|
|
|
+ gpe_register_info = gpe_event_info->register_info;
|
|
|
+
|
|
|
+ /* Get the register bitmask for this GPE */
|
|
|
+
|
|
|
+ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
|
|
|
+
|
|
|
+ /* GPE currently enabled (enable bit == 1)? */
|
|
|
+
|
|
|
+ status = acpi_hw_read(&enable_reg, &gpe_register_info->enable_address);
|
|
|
+ if (ACPI_FAILURE(status)) {
|
|
|
+ goto error_exit;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* GPE currently active (status bit == 1)? */
|
|
|
+
|
|
|
+ status = acpi_hw_read(&status_reg, &gpe_register_info->status_address);
|
|
|
+ if (ACPI_FAILURE(status)) {
|
|
|
+ goto error_exit;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Check if there is anything active at all in this GPE */
|
|
|
+
|
|
|
+ ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
|
|
|
+ "Read registers for GPE %02X: Status=%02X, Enable=%02X, "
|
|
|
+ "RunEnable=%02X, WakeEnable=%02X\n",
|
|
|
+ gpe_number,
|
|
|
+ (u32)(status_reg & register_bit),
|
|
|
+ (u32)(enable_reg & register_bit),
|
|
|
+ gpe_register_info->enable_for_run,
|
|
|
+ gpe_register_info->enable_for_wake));
|
|
|
+
|
|
|
+ enabled_status_byte = (u8)(status_reg & enable_reg);
|
|
|
+ if (!(enabled_status_byte & register_bit)) {
|
|
|
+ goto error_exit;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Invoke global event handler if present */
|
|
|
+
|
|
|
+ acpi_gpe_count++;
|
|
|
+ if (acpi_gbl_global_event_handler) {
|
|
|
+ acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE,
|
|
|
+ gpe_device, gpe_number,
|
|
|
+ acpi_gbl_global_event_handler_context);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Found an active GPE */
|
|
|
+
|
|
|
+ if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
|
|
|
+ ACPI_GPE_DISPATCH_RAW_HANDLER) {
|
|
|
+
|
|
|
+ /* Dispatch the event to a raw handler */
|
|
|
+
|
|
|
+ gpe_handler_info = gpe_event_info->dispatch.handler;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * There is no protection around the namespace node
|
|
|
+ * and the GPE handler to ensure a safe destruction
|
|
|
+ * because:
|
|
|
+ * 1. The namespace node is expected to always
|
|
|
+ * exist after loading a table.
|
|
|
+ * 2. The GPE handler is expected to be flushed by
|
|
|
+ * acpi_os_wait_events_complete() before the
|
|
|
+ * destruction.
|
|
|
+ */
|
|
|
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
|
|
|
+ int_status |=
|
|
|
+ gpe_handler_info->address(gpe_device, gpe_number,
|
|
|
+ gpe_handler_info->context);
|
|
|
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
|
|
|
+ } else {
|
|
|
+ /* Dispatch the event to a standard handler or method. */
|
|
|
+
|
|
|
+ int_status |= acpi_ev_gpe_dispatch(gpe_device,
|
|
|
+ gpe_event_info, gpe_number);
|
|
|
+ }
|
|
|
+
|
|
|
+error_exit:
|
|
|
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
|
|
|
+ return (int_status);
|
|
|
+}
|
|
|
+
|
|
|
/*******************************************************************************
|
|
|
*
|
|
|
* FUNCTION: acpi_ev_gpe_dispatch
|
|
@@ -739,8 +764,6 @@ acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
|
|
|
* DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
|
|
|
* or method (e.g. _Lxx/_Exx) handler.
|
|
|
*
|
|
|
- * This function executes at interrupt level.
|
|
|
- *
|
|
|
******************************************************************************/
|
|
|
|
|
|
u32
|