summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-13 13:45:57 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-13 13:45:57 -0800
commit18320f2a6871aaf2522f793fee4a67eccf5e131a (patch)
treebb900ce16b590a5e1ae8fcff78e5677e99fcbd35 /drivers
parentdb3ecdee1cf0538f11832f7ef66945c4dd903918 (diff)
parentc7fb90dfbef49b03e6f3fd6a32338e59cbcf34ee (diff)
downloadop-kernel-dev-18320f2a6871aaf2522f793fee4a67eccf5e131a.zip
op-kernel-dev-18320f2a6871aaf2522f793fee4a67eccf5e131a.tar.gz
Merge tag 'pm+acpi-3.20-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull more ACPI and power management updates from Rafael Wysocki: "These are two reverts related to system suspend breakage by one of a recent commits, a fix for a recently introduced bug in devfreq and a bunch of other things that didn't make it into my previous pull request, but otherwise are ready to go. Specifics: - Revert two ACPI EC driver commits, one that broke system suspend on Acer Aspire S5 and one that depends on it (Rafael J Wysocki). - Fix a typo leading to an incorrect check in the exynos-ppmu devfreq driver (Dan Carpenter). - Add support for one more Broadwell CPU model to intel_idle (Len Brown). - Fix an obscure problem with state transitions related to interrupts in the speedstep-smi cpufreq driver (Mikulas Patocka). - Remove some unnecessary messages related to the "out of memory" condition from the core PM code (Quentin Lambert). - Update turbostat parameters and documentation, add support for one more Broadwell CPU model to it and modify it to skip printing disabled package C-states (Len Brown)" * tag 'pm+acpi-3.20-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: PM / devfreq: event: testing the wrong variable cpufreq: speedstep-smi: enable interrupts when waiting PM / OPP / clk: Remove unnecessary OOM message Revert "ACPI / EC: Add query flushing support" Revert "ACPI / EC: Add GPE reference counting debugging messages" tools/power turbostat: support additional Broadwell model intel_idle: support additional Broadwell model tools/power turbostat: update parameters, documentation tools/power turbostat: Skip printing disabled package C-states
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/ec.c125
-rw-r--r--drivers/base/firmware_class.c1
-rw-r--r--drivers/base/power/clock_ops.c4
-rw-r--r--drivers/base/power/opp.c8
-rw-r--r--drivers/cpufreq/speedstep-lib.c3
-rw-r--r--drivers/cpufreq/speedstep-smi.c12
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c4
-rw-r--r--drivers/idle/intel_idle.c1
8 files changed, 39 insertions, 119 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 14d0c89..982b67f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -31,7 +31,6 @@
/* Uncomment next line to get verbose printout */
/* #define DEBUG */
-#define DEBUG_REF 0
#define pr_fmt(fmt) "ACPI : EC: " fmt
#include <linux/kernel.h>
@@ -77,9 +76,7 @@ enum ec_command {
* when trying to clear the EC */
enum {
- EC_FLAGS_EVENT_ENABLED, /* Event is enabled */
- EC_FLAGS_EVENT_PENDING, /* Event is pending */
- EC_FLAGS_EVENT_DETECTED, /* Event is detected */
+ EC_FLAGS_QUERY_PENDING, /* Query is pending */
EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and
* OpReg are installed */
EC_FLAGS_STARTED, /* Driver is started */
@@ -91,13 +88,6 @@ enum {
#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
-#define ec_debug_ref(ec, fmt, ...) \
- do { \
- if (DEBUG_REF) \
- pr_debug("%lu: " fmt, ec->reference_count, \
- ## __VA_ARGS__); \
- } while (0)
-
/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
module_param(ec_delay, uint, 0644);
@@ -161,12 +151,6 @@ static bool acpi_ec_flushed(struct acpi_ec *ec)
return ec->reference_count == 1;
}
-static bool acpi_ec_has_pending_event(struct acpi_ec *ec)
-{
- return test_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags) ||
- test_bit(EC_FLAGS_EVENT_PENDING, &ec->flags);
-}
-
/* --------------------------------------------------------------------------
* EC Registers
* -------------------------------------------------------------------------- */
@@ -334,97 +318,34 @@ static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
* the flush operation is not in
* progress
* @ec: the EC device
- * @allow_event: whether event should be handled
*
* This function must be used before taking a new action that should hold
* the reference count. If this function returns false, then the action
* must be discarded or it will prevent the flush operation from being
* completed.
- *
- * During flushing, QR_EC command need to pass this check when there is a
- * pending event, so that the reference count held for the pending event
- * can be decreased by the completion of the QR_EC command.
*/
-static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec,
- bool allow_event)
+static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
{
- if (!acpi_ec_started(ec)) {
- if (!allow_event || !acpi_ec_has_pending_event(ec))
- return false;
- }
+ if (!acpi_ec_started(ec))
+ return false;
acpi_ec_submit_request(ec);
return true;
}
-static void acpi_ec_submit_event(struct acpi_ec *ec)
+static void acpi_ec_submit_query(struct acpi_ec *ec)
{
- if (!test_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags) ||
- !test_bit(EC_FLAGS_EVENT_ENABLED, &ec->flags))
- return;
- /* Hold reference for pending event */
- if (!acpi_ec_submit_flushable_request(ec, true))
- return;
- ec_debug_ref(ec, "Increase event\n");
- if (!test_and_set_bit(EC_FLAGS_EVENT_PENDING, &ec->flags)) {
- pr_debug("***** Event query started *****\n");
+ if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
+ pr_debug("***** Event started *****\n");
schedule_work(&ec->work);
- return;
}
- acpi_ec_complete_request(ec);
- ec_debug_ref(ec, "Decrease event\n");
}
-static void acpi_ec_complete_event(struct acpi_ec *ec)
+static void acpi_ec_complete_query(struct acpi_ec *ec)
{
if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
- clear_bit(EC_FLAGS_EVENT_PENDING, &ec->flags);
- pr_debug("***** Event query stopped *****\n");
- /* Unhold reference for pending event */
- acpi_ec_complete_request(ec);
- ec_debug_ref(ec, "Decrease event\n");
- /* Check if there is another SCI_EVT detected */
- acpi_ec_submit_event(ec);
- }
-}
-
-static void acpi_ec_submit_detection(struct acpi_ec *ec)
-{
- /* Hold reference for query submission */
- if (!acpi_ec_submit_flushable_request(ec, false))
- return;
- ec_debug_ref(ec, "Increase query\n");
- if (!test_and_set_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags)) {
- pr_debug("***** Event detection blocked *****\n");
- acpi_ec_submit_event(ec);
- return;
+ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+ pr_debug("***** Event stopped *****\n");
}
- acpi_ec_complete_request(ec);
- ec_debug_ref(ec, "Decrease query\n");
-}
-
-static void acpi_ec_complete_detection(struct acpi_ec *ec)
-{
- if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
- clear_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags);
- pr_debug("***** Event detetion unblocked *****\n");
- /* Unhold reference for query submission */
- acpi_ec_complete_request(ec);
- ec_debug_ref(ec, "Decrease query\n");
- }
-}
-
-static void acpi_ec_enable_event(struct acpi_ec *ec)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ec->lock, flags);
- set_bit(EC_FLAGS_EVENT_ENABLED, &ec->flags);
- /*
- * An event may be pending even with SCI_EVT=0, so QR_EC should
- * always be issued right after started.
- */
- acpi_ec_submit_detection(ec);
- spin_unlock_irqrestore(&ec->lock, flags);
}
static int ec_transaction_completed(struct acpi_ec *ec)
@@ -468,7 +389,6 @@ static void advance_transaction(struct acpi_ec *ec)
t->rdata[t->ri++] = acpi_ec_read_data(ec);
if (t->rlen == t->ri) {
t->flags |= ACPI_EC_COMMAND_COMPLETE;
- acpi_ec_complete_event(ec);
if (t->command == ACPI_EC_COMMAND_QUERY)
pr_debug("***** Command(%s) hardware completion *****\n",
acpi_ec_cmd_string(t->command));
@@ -479,7 +399,6 @@ static void advance_transaction(struct acpi_ec *ec)
} else if (t->wlen == t->wi &&
(status & ACPI_EC_FLAG_IBF) == 0) {
t->flags |= ACPI_EC_COMMAND_COMPLETE;
- acpi_ec_complete_event(ec);
wakeup = true;
}
goto out;
@@ -488,17 +407,16 @@ static void advance_transaction(struct acpi_ec *ec)
!(status & ACPI_EC_FLAG_SCI) &&
(t->command == ACPI_EC_COMMAND_QUERY)) {
t->flags |= ACPI_EC_COMMAND_POLL;
- acpi_ec_complete_detection(ec);
+ acpi_ec_complete_query(ec);
t->rdata[t->ri++] = 0x00;
t->flags |= ACPI_EC_COMMAND_COMPLETE;
- acpi_ec_complete_event(ec);
pr_debug("***** Command(%s) software completion *****\n",
acpi_ec_cmd_string(t->command));
wakeup = true;
} else if ((status & ACPI_EC_FLAG_IBF) == 0) {
acpi_ec_write_cmd(ec, t->command);
t->flags |= ACPI_EC_COMMAND_POLL;
- acpi_ec_complete_detection(ec);
+ acpi_ec_complete_query(ec);
} else
goto err;
goto out;
@@ -519,7 +437,7 @@ err:
}
out:
if (status & ACPI_EC_FLAG_SCI)
- acpi_ec_submit_detection(ec);
+ acpi_ec_submit_query(ec);
if (wakeup && in_interrupt())
wake_up(&ec->wait);
}
@@ -580,11 +498,10 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
/* start transaction */
spin_lock_irqsave(&ec->lock, tmp);
/* Enable GPE for command processing (IBF=0/OBF=1) */
- if (!acpi_ec_submit_flushable_request(ec, true)) {
+ if (!acpi_ec_submit_flushable_request(ec)) {
ret = -EINVAL;
goto unlock;
}
- ec_debug_ref(ec, "Increase command\n");
/* following two actions should be kept atomic */
ec->curr = t;
pr_debug("***** Command(%s) started *****\n",
@@ -600,7 +517,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
ec->curr = NULL;
/* Disable GPE for command processing (IBF=0/OBF=1) */
acpi_ec_complete_request(ec);
- ec_debug_ref(ec, "Decrease command\n");
unlock:
spin_unlock_irqrestore(&ec->lock, tmp);
return ret;
@@ -762,10 +678,8 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
pr_debug("+++++ Starting EC +++++\n");
/* Enable GPE for event processing (SCI_EVT=1) */
- if (!resuming) {
+ if (!resuming)
acpi_ec_submit_request(ec);
- ec_debug_ref(ec, "Increase driver\n");
- }
pr_info("+++++ EC started +++++\n");
}
spin_unlock_irqrestore(&ec->lock, flags);
@@ -794,10 +708,8 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
wait_event(ec->wait, acpi_ec_stopped(ec));
spin_lock_irqsave(&ec->lock, flags);
/* Disable GPE for event processing (SCI_EVT=1) */
- if (!suspending) {
+ if (!suspending)
acpi_ec_complete_request(ec);
- ec_debug_ref(ec, "Decrease driver\n");
- }
clear_bit(EC_FLAGS_STARTED, &ec->flags);
clear_bit(EC_FLAGS_STOPPED, &ec->flags);
pr_info("+++++ EC stopped +++++\n");
@@ -967,9 +879,7 @@ static void acpi_ec_gpe_poller(struct work_struct *work)
{
struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
- pr_debug("***** Event poller started *****\n");
acpi_ec_query(ec, NULL);
- pr_debug("***** Event poller stopped *****\n");
}
static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
@@ -1039,6 +949,7 @@ static struct acpi_ec *make_acpi_ec(void)
if (!ec)
return NULL;
+ ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
mutex_init(&ec->mutex);
init_waitqueue_head(&ec->wait);
INIT_LIST_HEAD(&ec->list);
@@ -1189,7 +1100,7 @@ static int acpi_ec_add(struct acpi_device *device)
ret = ec_install_handlers(ec);
/* EC is fully operational, allow queries */
- acpi_ec_enable_event(ec);
+ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
/* Clear stale _Q events if hardware might require that */
if (EC_FLAGS_CLEAR_ON_RESUME)
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 58470c3..c3293f0 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -855,7 +855,6 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
if (!fw_priv) {
- dev_err(device, "%s: kmalloc failed\n", __func__);
fw_priv = ERR_PTR(-ENOMEM);
goto exit;
}
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index d626576..7fdd017 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -81,10 +81,8 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
return -EINVAL;
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
- if (!ce) {
- dev_err(dev, "Not enough memory for clock entry.\n");
+ if (!ce)
return -ENOMEM;
- }
if (con_id) {
ce->con_id = kstrdup(con_id, GFP_KERNEL);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 15bf299..677fb28 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -474,10 +474,8 @@ static int _opp_add_dynamic(struct device *dev, unsigned long freq,
/* allocate new OPP node */
new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
- if (!new_opp) {
- dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
+ if (!new_opp)
return -ENOMEM;
- }
/* Hold our list modification lock here */
mutex_lock(&dev_opp_list_lock);
@@ -695,10 +693,8 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
/* keep the node allocated */
new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
- if (!new_opp) {
- dev_warn(dev, "%s: Unable to create OPP\n", __func__);
+ if (!new_opp)
return -ENOMEM;
- }
mutex_lock(&dev_opp_list_lock);
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 7047821..4ab7a21 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -400,6 +400,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
pr_debug("previous speed is %u\n", prev_speed);
+ preempt_disable();
local_irq_save(flags);
/* switch to low state */
@@ -464,6 +465,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
out:
local_irq_restore(flags);
+ preempt_enable();
+
return ret;
}
EXPORT_SYMBOL_GPL(speedstep_get_freqs);
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 5fc96d5..819229e 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -156,6 +156,7 @@ static void speedstep_set_state(unsigned int state)
return;
/* Disable IRQs */
+ preempt_disable();
local_irq_save(flags);
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
@@ -166,9 +167,19 @@ static void speedstep_set_state(unsigned int state)
do {
if (retry) {
+ /*
+ * We need to enable interrupts, otherwise the blockage
+ * won't resolve.
+ *
+ * We disable preemption so that other processes don't
+ * run. If other processes were running, they could
+ * submit more DMA requests, making the blockage worse.
+ */
pr_debug("retry %u, previous result %u, waiting...\n",
retry, result);
+ local_irq_enable();
mdelay(retry * 50);
+ local_irq_disable();
}
retry++;
__asm__ __volatile__(
@@ -185,6 +196,7 @@ static void speedstep_set_state(unsigned int state)
/* enable IRQs */
local_irq_restore(flags);
+ preempt_enable();
if (new_state == state)
pr_debug("change to %u MHz succeeded after %u tries "
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 135be0a..ad83473 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -327,8 +327,8 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
for (i = 0; i < info->num_events; i++) {
edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
- if (IS_ERR(edev)) {
- ret = PTR_ERR(edev);
+ if (IS_ERR(edev[i])) {
+ ret = PTR_ERR(edev[i]);
dev_err(&pdev->dev,
"failed to add devfreq-event device\n");
goto err;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 9cceacb..1bc0c170 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -727,6 +727,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x46, idle_cpu_hsw),
ICPU(0x4d, idle_cpu_avn),
ICPU(0x3d, idle_cpu_bdw),
+ ICPU(0x47, idle_cpu_bdw),
ICPU(0x4f, idle_cpu_bdw),
ICPU(0x56, idle_cpu_bdw),
{}
OpenPOWER on IntegriCloud