summaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-01-25 21:08:16 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-25 21:08:16 +0100
commit00597c3ed78e424bdafff123565c078d8b6088cf (patch)
tree9aa1df064152008969f6fa6eacec7f2b15110f75 /kernel/sched_rt.c
parent6e1938d3ad58c940ec4119d387dd92a787cb238c (diff)
downloadop-kernel-dev-00597c3ed78e424bdafff123565c078d8b6088cf.zip
op-kernel-dev-00597c3ed78e424bdafff123565c078d8b6088cf.tar.gz
sched: remove leftover debugging
remove leftover debugging. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c8
1 files changed, 0 insertions, 8 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index deff0c7..cc38521c 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -253,8 +253,6 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
struct list_head *queue;
int idx;
- assert_spin_locked(&rq->lock);
-
if (likely(rq->rt.rt_nr_running < 2))
return NULL;
@@ -500,8 +498,6 @@ static int push_rt_task(struct rq *rq)
int ret = 0;
int paranoid = RT_MAX_TRIES;
- assert_spin_locked(&rq->lock);
-
if (!rq->rt.overloaded)
return 0;
@@ -546,8 +542,6 @@ static int push_rt_task(struct rq *rq)
goto out;
}
- assert_spin_locked(&lowest_rq->lock);
-
deactivate_task(rq, next_task, 0);
set_task_cpu(next_task, lowest_rq->cpu);
activate_task(lowest_rq, next_task, 0);
@@ -589,8 +583,6 @@ static int pull_rt_task(struct rq *this_rq)
int cpu;
int ret = 0;
- assert_spin_locked(&this_rq->lock);
-
/*
* If cpusets are used, and we have overlapping
* run queue cpusets, then this algorithm may not catch all.
OpenPOWER on IntegriCloud