Revert "ACPI / EC: Add query flushing support"

Revert commit f252cb09e1 (ACPI / EC: Add query flushing support),
because it breaks system suspend on Acer Aspire S5.  The machine
just hangs solid at the last stage of suspend (after taking non-boot
CPUs offline).

Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
This commit is contained in:
Rafael J. Wysocki 2015-02-11 17:35:05 +01:00
parent e06bf91b59
commit 37d11391c2
1 changed files with 16 additions and 85 deletions

View File

@ -76,9 +76,7 @@ enum ec_command {
* when trying to clear the EC */
enum {
EC_FLAGS_EVENT_ENABLED, /* Event is enabled */
EC_FLAGS_EVENT_PENDING, /* Event is pending */
EC_FLAGS_EVENT_DETECTED, /* Event is detected */
EC_FLAGS_QUERY_PENDING, /* Query is pending */
EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and
* OpReg are installed */
EC_FLAGS_STARTED, /* Driver is started */
@ -153,12 +151,6 @@ static bool acpi_ec_flushed(struct acpi_ec *ec)
return ec->reference_count == 1;
}
static bool acpi_ec_has_pending_event(struct acpi_ec *ec)
{
return test_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags) ||
test_bit(EC_FLAGS_EVENT_PENDING, &ec->flags);
}
/* --------------------------------------------------------------------------
* EC Registers
* -------------------------------------------------------------------------- */
@ -326,93 +318,36 @@ static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
* the flush operation is not in
* progress
* @ec: the EC device
* @allow_event: whether event should be handled
*
* This function must be used before taking a new action that should hold
* the reference count. If this function returns false, then the action
* must be discarded or it will prevent the flush operation from being
* completed.
*
* During flushing, QR_EC command need to pass this check when there is a
* pending event, so that the reference count held for the pending event
* can be decreased by the completion of the QR_EC command.
*/
static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec,
bool allow_event)
static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
{
if (!acpi_ec_started(ec)) {
if (!allow_event || !acpi_ec_has_pending_event(ec))
return false;
}
if (!acpi_ec_started(ec))
return false;
acpi_ec_submit_request(ec);
return true;
}
static void acpi_ec_submit_event(struct acpi_ec *ec)
static void acpi_ec_submit_query(struct acpi_ec *ec)
{
if (!test_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags) ||
!test_bit(EC_FLAGS_EVENT_ENABLED, &ec->flags))
return;
/* Hold reference for pending event */
if (!acpi_ec_submit_flushable_request(ec, true))
return;
if (!test_and_set_bit(EC_FLAGS_EVENT_PENDING, &ec->flags)) {
pr_debug("***** Event query started *****\n");
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
pr_debug("***** Event started *****\n");
schedule_work(&ec->work);
return;
}
acpi_ec_complete_request(ec);
}
static void acpi_ec_complete_event(struct acpi_ec *ec)
static void acpi_ec_complete_query(struct acpi_ec *ec)
{
if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
clear_bit(EC_FLAGS_EVENT_PENDING, &ec->flags);
pr_debug("***** Event query stopped *****\n");
/* Unhold reference for pending event */
acpi_ec_complete_request(ec);
/* Check if there is another SCI_EVT detected */
acpi_ec_submit_event(ec);
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
pr_debug("***** Event stopped *****\n");
}
}
static void acpi_ec_submit_detection(struct acpi_ec *ec)
{
/* Hold reference for query submission */
if (!acpi_ec_submit_flushable_request(ec, false))
return;
if (!test_and_set_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags)) {
pr_debug("***** Event detection blocked *****\n");
acpi_ec_submit_event(ec);
return;
}
acpi_ec_complete_request(ec);
}
static void acpi_ec_complete_detection(struct acpi_ec *ec)
{
if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
clear_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags);
pr_debug("***** Event detetion unblocked *****\n");
/* Unhold reference for query submission */
acpi_ec_complete_request(ec);
}
}
static void acpi_ec_enable_event(struct acpi_ec *ec)
{
unsigned long flags;
spin_lock_irqsave(&ec->lock, flags);
set_bit(EC_FLAGS_EVENT_ENABLED, &ec->flags);
/*
* An event may be pending even with SCI_EVT=0, so QR_EC should
* always be issued right after started.
*/
acpi_ec_submit_detection(ec);
spin_unlock_irqrestore(&ec->lock, flags);
}
static int ec_transaction_completed(struct acpi_ec *ec)
{
unsigned long flags;
@ -454,7 +389,6 @@ static void advance_transaction(struct acpi_ec *ec)
t->rdata[t->ri++] = acpi_ec_read_data(ec);
if (t->rlen == t->ri) {
t->flags |= ACPI_EC_COMMAND_COMPLETE;
acpi_ec_complete_event(ec);
if (t->command == ACPI_EC_COMMAND_QUERY)
pr_debug("***** Command(%s) hardware completion *****\n",
acpi_ec_cmd_string(t->command));
@ -465,7 +399,6 @@ static void advance_transaction(struct acpi_ec *ec)
} else if (t->wlen == t->wi &&
(status & ACPI_EC_FLAG_IBF) == 0) {
t->flags |= ACPI_EC_COMMAND_COMPLETE;
acpi_ec_complete_event(ec);
wakeup = true;
}
goto out;
@ -474,17 +407,16 @@ static void advance_transaction(struct acpi_ec *ec)
!(status & ACPI_EC_FLAG_SCI) &&
(t->command == ACPI_EC_COMMAND_QUERY)) {
t->flags |= ACPI_EC_COMMAND_POLL;
acpi_ec_complete_detection(ec);
acpi_ec_complete_query(ec);
t->rdata[t->ri++] = 0x00;
t->flags |= ACPI_EC_COMMAND_COMPLETE;
acpi_ec_complete_event(ec);
pr_debug("***** Command(%s) software completion *****\n",
acpi_ec_cmd_string(t->command));
wakeup = true;
} else if ((status & ACPI_EC_FLAG_IBF) == 0) {
acpi_ec_write_cmd(ec, t->command);
t->flags |= ACPI_EC_COMMAND_POLL;
acpi_ec_complete_detection(ec);
acpi_ec_complete_query(ec);
} else
goto err;
goto out;
@ -505,7 +437,7 @@ static void advance_transaction(struct acpi_ec *ec)
}
out:
if (status & ACPI_EC_FLAG_SCI)
acpi_ec_submit_detection(ec);
acpi_ec_submit_query(ec);
if (wakeup && in_interrupt())
wake_up(&ec->wait);
}
@ -566,7 +498,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
/* start transaction */
spin_lock_irqsave(&ec->lock, tmp);
/* Enable GPE for command processing (IBF=0/OBF=1) */
if (!acpi_ec_submit_flushable_request(ec, true)) {
if (!acpi_ec_submit_flushable_request(ec)) {
ret = -EINVAL;
goto unlock;
}
@ -947,9 +879,7 @@ static void acpi_ec_gpe_poller(struct work_struct *work)
{
struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
pr_debug("***** Event poller started *****\n");
acpi_ec_query(ec, NULL);
pr_debug("***** Event poller stopped *****\n");
}
static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
@ -1019,6 +949,7 @@ static struct acpi_ec *make_acpi_ec(void)
if (!ec)
return NULL;
ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
mutex_init(&ec->mutex);
init_waitqueue_head(&ec->wait);
INIT_LIST_HEAD(&ec->list);
@ -1169,7 +1100,7 @@ static int acpi_ec_add(struct acpi_device *device)
ret = ec_install_handlers(ec);
/* EC is fully operational, allow queries */
acpi_ec_enable_event(ec);
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
/* Clear stale _Q events if hardware might require that */
if (EC_FLAGS_CLEAR_ON_RESUME)