summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/cpuidle/cpuidle.c38
-rw-r--r--drivers/cpuidle/governors/menu.c4
-rw-r--r--include/linux/cpuidle.h4
-rw-r--r--kernel/sched/idle.c114
4 files changed, 93 insertions, 67 deletions
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 61c417b..7f1b8f5 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -65,7 +65,7 @@ int cpuidle_play_dead(void)
return -ENODEV;
/* Find lowest-power state that supports long-term idle */
- for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--)
+ for (i = drv->state_count - 1; i >= 0; i--)
if (drv->states[i].enter_dead)
return drv->states[i].enter_dead(dev, i);
@@ -73,16 +73,21 @@ int cpuidle_play_dead(void)
}
static int find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev, bool freeze)
+ struct cpuidle_device *dev,
+ unsigned int max_latency,
+ unsigned int forbidden_flags,
+ bool freeze)
{
unsigned int latency_req = 0;
- int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1;
+ int i, ret = -ENXIO;
- for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
+ for (i = 0; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
struct cpuidle_state_usage *su = &dev->states_usage[i];
if (s->disabled || su->disable || s->exit_latency <= latency_req
+ || s->exit_latency > max_latency
+ || (s->flags & forbidden_flags)
|| (freeze && !s->enter_freeze))
continue;
@@ -100,7 +105,7 @@ static int find_deepest_state(struct cpuidle_driver *drv,
int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
- return find_deepest_state(drv, dev, false);
+ return find_deepest_state(drv, dev, UINT_MAX, 0, false);
}
static void enter_freeze_proper(struct cpuidle_driver *drv,
@@ -139,7 +144,7 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* that interrupts won't be enabled when it exits and allows the tick to
* be frozen safely.
*/
- index = find_deepest_state(drv, dev, true);
+ index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
if (index >= 0)
enter_freeze_proper(drv, dev, index);
@@ -150,7 +155,7 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* cpuidle_enter_state - enter the state and update stats
* @dev: cpuidle device for this cpu
* @drv: cpuidle driver for this cpu
- * @next_state: index into drv->states of the state to enter
+ * @index: index into the states table in @drv of the state to enter
*/
int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
int index)
@@ -167,8 +172,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
* local timer will be shut down. If a local timer is used from another
* CPU as a broadcast timer, this call may fail if it is not available.
*/
- if (broadcast && tick_broadcast_enter())
- return -EBUSY;
+ if (broadcast && tick_broadcast_enter()) {
+ index = find_deepest_state(drv, dev, target_state->exit_latency,
+ CPUIDLE_FLAG_TIMER_STOP, false);
+ if (index < 0) {
+ default_idle_call();
+ return -EBUSY;
+ }
+ target_state = &drv->states[index];
+ }
+
+ /* Take note of the planned idle state. */
+ sched_idle_set_state(target_state);
trace_cpu_idle_rcuidle(index, dev->cpu);
time_start = ktime_get();
@@ -178,6 +193,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
time_end = ktime_get();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
+ /* The cpu is no longer idle or about to enter idle. */
+ sched_idle_set_state(NULL);
+
if (broadcast) {
if (WARN_ON_ONCE(!irqs_disabled()))
local_irq_disable();
@@ -249,7 +267,7 @@ int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/
void cpuidle_reflect(struct cpuidle_device *dev, int index)
{
- if (cpuidle_curr_governor->reflect)
+ if (cpuidle_curr_governor->reflect && index >= 0)
cpuidle_curr_governor->reflect(dev, index);
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index b8a5fa1..22e4463 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -367,9 +367,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
static void menu_reflect(struct cpuidle_device *dev, int index)
{
struct menu_device *data = this_cpu_ptr(&menu_devices);
+
data->last_state_idx = index;
- if (index >= 0)
- data->needs_update = 1;
+ data->needs_update = 1;
}
/**
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 9c5e892..c7a6364 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -200,6 +200,10 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
struct cpuidle_device *dev) {return NULL; }
#endif
+/* kernel/sched/idle.c */
+extern void sched_idle_set_state(struct cpuidle_state *idle_state);
+extern void default_idle_call(void);
+
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
#else
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index fefcb1f..594275e 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -15,6 +15,15 @@
#include "sched.h"
+/**
+ * sched_idle_set_state - Record idle state for the current CPU.
+ * @idle_state: State to record.
+ */
+void sched_idle_set_state(struct cpuidle_state *idle_state)
+{
+ idle_set_state(this_rq(), idle_state);
+}
+
static int __read_mostly cpu_idle_force_poll;
void cpu_idle_poll_ctrl(bool enable)
@@ -68,6 +77,46 @@ void __weak arch_cpu_idle(void)
}
/**
+ * default_idle_call - Default CPU idle routine.
+ *
+ * To use when the cpuidle framework cannot be used.
+ */
+void default_idle_call(void)
+{
+ if (current_clr_polling_and_test())
+ local_irq_enable();
+ else
+ arch_cpu_idle();
+}
+
+static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ int next_state)
+{
+ /* Fall back to the default arch idle method on errors. */
+ if (next_state < 0) {
+ default_idle_call();
+ return next_state;
+ }
+
+ /*
+ * The idle task must be scheduled, it is pointless to go to idle, just
+ * update no idle residency and return.
+ */
+ if (current_clr_polling_and_test()) {
+ dev->last_residency = 0;
+ local_irq_enable();
+ return -EBUSY;
+ }
+
+ /*
+ * Enter the idle state previously returned by the governor decision.
+ * This function will block until an interrupt occurs and will take
+ * care of re-enabling the local interrupts
+ */
+ return cpuidle_enter(drv, dev, next_state);
+}
+
+/**
* cpuidle_idle_call - the main idle function
*
* NOTE: no locks or semaphores should be used here
@@ -81,7 +130,6 @@ static void cpuidle_idle_call(void)
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int next_state, entered_state;
- bool reflect;
/*
* Check if the idle task must be rescheduled. If it is the
@@ -105,8 +153,10 @@ static void cpuidle_idle_call(void)
*/
rcu_idle_enter();
- if (cpuidle_not_available(drv, dev))
- goto use_default;
+ if (cpuidle_not_available(drv, dev)) {
+ default_idle_call();
+ goto exit_idle;
+ }
/*
* Suspend-to-idle ("freeze") is a system state in which all user space
@@ -124,52 +174,19 @@ static void cpuidle_idle_call(void)
goto exit_idle;
}
- reflect = false;
next_state = cpuidle_find_deepest_state(drv, dev);
+ call_cpuidle(drv, dev, next_state);
} else {
- reflect = true;
/*
* Ask the cpuidle framework to choose a convenient idle state.
*/
next_state = cpuidle_select(drv, dev);
- }
- /* Fall back to the default arch idle method on errors. */
- if (next_state < 0)
- goto use_default;
-
- /*
- * The idle task must be scheduled, it is pointless to
- * go to idle, just update no idle residency and get
- * out of this function
- */
- if (current_clr_polling_and_test()) {
- dev->last_residency = 0;
- entered_state = next_state;
- local_irq_enable();
- goto exit_idle;
- }
-
- /* Take note of the planned idle state. */
- idle_set_state(this_rq(), &drv->states[next_state]);
-
- /*
- * Enter the idle state previously returned by the governor decision.
- * This function will block until an interrupt occurs and will take
- * care of re-enabling the local interrupts
- */
- entered_state = cpuidle_enter(drv, dev, next_state);
-
- /* The cpu is no longer idle or about to enter idle. */
- idle_set_state(this_rq(), NULL);
-
- if (entered_state == -EBUSY)
- goto use_default;
-
- /*
- * Give the governor an opportunity to reflect on the outcome
- */
- if (reflect)
+ entered_state = call_cpuidle(drv, dev, next_state);
+ /*
+ * Give the governor an opportunity to reflect on the outcome
+ */
cpuidle_reflect(dev, entered_state);
+ }
exit_idle:
__current_set_polling();
@@ -182,19 +199,6 @@ exit_idle:
rcu_idle_exit();
start_critical_timings();
- return;
-
-use_default:
- /*
- * We can't use the cpuidle framework, let's use the default
- * idle routine.
- */
- if (current_clr_polling_and_test())
- local_irq_enable();
- else
- arch_cpu_idle();
-
- goto exit_idle;
}
DEFINE_PER_CPU(bool, cpu_dead_idle);
OpenPOWER on IntegriCloud