summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorHenrik Austad <henrik@austad.us>2015-09-09 17:00:41 +0200
committerIngo Molnar <mingo@kernel.org>2015-09-18 09:23:13 +0200
commit20f9cd2acb1d74a8bf4b4087267f586e6ecdbc03 (patch)
tree0fbd33d1065457c055134c5149bc2650491291a8 /kernel
parent0c6a5b4319928b769ba81eff45bd679737a29ba1 (diff)
downloadop-kernel-dev-20f9cd2acb1d74a8bf4b4087267f586e6ecdbc03.zip
op-kernel-dev-20f9cd2acb1d74a8bf4b4087267f586e6ecdbc03.tar.gz
sched/core: Make policy-testing consistent
Most of the policy-tests are done via the <class>_policy() helpers with the notable exception of idle. A new wrapper for valid_policy() has also been added to improve readability in set_load_weight(). This commit does not change the logical behavior of the scheduler core. Signed-off-by: Henrik Austad <henrik@austad.us> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/1441810841-4756-1-git-send-email-henrik@austad.us Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c9
-rw-r--r--kernel/sched/sched.h9
2 files changed, 12 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6ab415a..1b30b5b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -817,7 +817,7 @@ static void set_load_weight(struct task_struct *p)
/*
* SCHED_IDLE tasks get minimal weight:
*/
- if (p->policy == SCHED_IDLE) {
+ if (idle_policy(p->policy)) {
load->weight = scale_load(WEIGHT_IDLEPRIO);
load->inv_weight = WMULT_IDLEPRIO;
return;
@@ -3733,10 +3733,7 @@ recheck:
} else {
reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
- if (policy != SCHED_DEADLINE &&
- policy != SCHED_FIFO && policy != SCHED_RR &&
- policy != SCHED_NORMAL && policy != SCHED_BATCH &&
- policy != SCHED_IDLE)
+ if (!valid_policy(policy))
return -EINVAL;
}
@@ -3792,7 +3789,7 @@ recheck:
* Treat SCHED_IDLE as nice 20. Only allow a switch to
* SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
*/
- if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
+ if (idle_policy(p->policy) && !idle_policy(policy)) {
if (!can_nice(p, task_nice(p)))
return -EPERM;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 167ab48..3845a71 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -84,6 +84,10 @@ static inline void update_cpu_load_active(struct rq *this_rq) { }
*/
#define RUNTIME_INF ((u64)~0ULL)
+static inline int idle_policy(int policy)
+{
+ return policy == SCHED_IDLE;
+}
static inline int fair_policy(int policy)
{
return policy == SCHED_NORMAL || policy == SCHED_BATCH;
@@ -98,6 +102,11 @@ static inline int dl_policy(int policy)
{
return policy == SCHED_DEADLINE;
}
+static inline bool valid_policy(int policy)
+{
+ return idle_policy(policy) || fair_policy(policy) ||
+ rt_policy(policy) || dl_policy(policy);
+}
static inline int task_has_rt_policy(struct task_struct *p)
{
OpenPOWER on IntegriCloud