summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-06-23 10:52:59 +0200
committerIngo Molnar <mingo@elte.hu>2008-06-23 10:52:59 +0200
commita60b33cf59d1c9e0e363287fce799cb23d45660c (patch)
tree85eb8feea5717cf472a2549e53f59183a95617c5 /kernel
parent0f476b6d91a1395bda6464e653ce66ea9bea7167 (diff)
parent481c5346d0981940ee63037eb53e4e37b0735c10 (diff)
downloadop-kernel-dev-a60b33cf59d1c9e0e363287fce799cb23d45660c.zip
op-kernel-dev-a60b33cf59d1c9e0e363287fce799cb23d45660c.tar.gz
Merge branch 'linus' into core/softirq
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c10
-rw-r--r--kernel/rcupreempt.c2
-rw-r--r--kernel/sched.c34
-rw-r--r--kernel/sched_rt.c63
-rw-r--r--kernel/sched_stats.h6
-rw-r--r--kernel/softlockup.c15
6 files changed, 91 insertions, 39 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 039baa4..9fceb97 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1037,8 +1037,8 @@ int current_cpuset_is_being_rebound(void)
static int update_relax_domain_level(struct cpuset *cs, s64 val)
{
- if ((int)val < 0)
- val = -1;
+ if (val < -1 || val >= SD_LV_MAX)
+ return -EINVAL;
if (val != cs->relax_domain_level) {
cs->relax_domain_level = val;
@@ -1890,6 +1890,12 @@ static void common_cpu_mem_hotplug_unplug(void)
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
scan_for_empty_cpusets(&top_cpuset);
+ /*
+ * Scheduler destroys domains on hotplug events.
+ * Rebuild them based on the current settings.
+ */
+ rebuild_sched_domains();
+
cgroup_unlock();
}
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 9dd827d..902dce2 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -217,8 +217,6 @@ long rcu_batches_completed(void)
}
EXPORT_SYMBOL_GPL(rcu_batches_completed);
-EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
-
void __rcu_read_lock(void)
{
int idx;
diff --git a/kernel/sched.c b/kernel/sched.c
index 7c7b2d0..33680bc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1127,6 +1127,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
+#ifdef CONFIG_SMP
static void hotplug_hrtick_disable(int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -1182,6 +1183,7 @@ static void init_hrtick(void)
{
hotcpu_notifier(hotplug_hrtick, 0);
}
+#endif /* CONFIG_SMP */
static void init_rq_hrtick(struct rq *rq)
{
@@ -6877,7 +6879,12 @@ static int default_relax_domain_level = -1;
static int __init setup_relax_domain_level(char *str)
{
- default_relax_domain_level = simple_strtoul(str, NULL, 0);
+ unsigned long val;
+
+ val = simple_strtoul(str, NULL, 0);
+ if (val < SD_LV_MAX)
+ default_relax_domain_level = val;
+
return 1;
}
__setup("relax_domain_level=", setup_relax_domain_level);
@@ -7236,6 +7243,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
}
/*
+ * Free current domain masks.
+ * Called after all cpus are attached to NULL domain.
+ */
+static void free_sched_domains(void)
+{
+ ndoms_cur = 0;
+ if (doms_cur != &fallback_doms)
+ kfree(doms_cur);
+ doms_cur = &fallback_doms;
+}
+
+/*
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
* For now this just excludes isolated cpus, but could be used to
* exclude other special cases in the future.
@@ -7382,6 +7401,7 @@ int arch_reinit_sched_domains(void)
get_online_cpus();
mutex_lock(&sched_domains_mutex);
detach_destroy_domains(&cpu_online_map);
+ free_sched_domains();
err = arch_init_sched_domains(&cpu_online_map);
mutex_unlock(&sched_domains_mutex);
put_online_cpus();
@@ -7467,6 +7487,7 @@ static int update_sched_domains(struct notifier_block *nfb,
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
detach_destroy_domains(&cpu_online_map);
+ free_sched_domains();
return NOTIFY_OK;
case CPU_UP_CANCELED:
@@ -7485,8 +7506,16 @@ static int update_sched_domains(struct notifier_block *nfb,
return NOTIFY_DONE;
}
+#ifndef CONFIG_CPUSETS
+ /*
+ * Create default domain partitioning if cpusets are disabled.
+ * Otherwise we let cpusets rebuild the domains based on the
+ * current setup.
+ */
+
/* The hotplug lock is already held by cpu_up/cpu_down */
arch_init_sched_domains(&cpu_online_map);
+#endif
return NOTIFY_OK;
}
@@ -7626,7 +7655,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
else
rt_se->rt_rq = parent->my_q;
- rt_se->rt_rq = &rq->rt;
rt_se->my_q = rt_rq;
rt_se->parent = parent;
INIT_LIST_HEAD(&rt_se->run_list);
@@ -8348,7 +8376,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
#ifdef CONFIG_CGROUP_SCHED
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
- struct task_group *tgi, *parent = tg->parent;
+ struct task_group *tgi, *parent = tg ? tg->parent : NULL;
unsigned long total = 0;
if (!parent) {
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 3432d57..1dad5bb 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -449,13 +449,19 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
#endif
}
-static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
+static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
struct rt_rq *group_rq = group_rt_rq(rt_se);
- if (group_rq && rt_rq_throttled(group_rq))
+ /*
+ * Don't enqueue the group if its throttled, or when empty.
+ * The latter is a consequence of the former when a child group
+ * get throttled and the current group doesn't have any other
+ * active members.
+ */
+ if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
return;
list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
@@ -464,7 +470,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
inc_rt_tasks(rt_se, rt_rq);
}
-static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
+static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
@@ -480,11 +486,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
* Because the prio of an upper entry depends on the lower
* entries, we must remove entries top - down.
*/
-static void dequeue_rt_stack(struct task_struct *p)
+static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
{
- struct sched_rt_entity *rt_se, *back = NULL;
+ struct sched_rt_entity *back = NULL;
- rt_se = &p->rt;
for_each_sched_rt_entity(rt_se) {
rt_se->back = back;
back = rt_se;
@@ -492,7 +497,26 @@ static void dequeue_rt_stack(struct task_struct *p)
for (rt_se = back; rt_se; rt_se = rt_se->back) {
if (on_rt_rq(rt_se))
- dequeue_rt_entity(rt_se);
+ __dequeue_rt_entity(rt_se);
+ }
+}
+
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
+{
+ dequeue_rt_stack(rt_se);
+ for_each_sched_rt_entity(rt_se)
+ __enqueue_rt_entity(rt_se);
+}
+
+static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
+{
+ dequeue_rt_stack(rt_se);
+
+ for_each_sched_rt_entity(rt_se) {
+ struct rt_rq *rt_rq = group_rt_rq(rt_se);
+
+ if (rt_rq && rt_rq->rt_nr_running)
+ __enqueue_rt_entity(rt_se);
}
}
@@ -506,32 +530,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
if (wakeup)
rt_se->timeout = 0;
- dequeue_rt_stack(p);
-
- /*
- * enqueue everybody, bottom - up.
- */
- for_each_sched_rt_entity(rt_se)
- enqueue_rt_entity(rt_se);
+ enqueue_rt_entity(rt_se);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
{
struct sched_rt_entity *rt_se = &p->rt;
- struct rt_rq *rt_rq;
update_curr_rt(rq);
-
- dequeue_rt_stack(p);
-
- /*
- * re-enqueue all non-empty rt_rq entities.
- */
- for_each_sched_rt_entity(rt_se) {
- rt_rq = group_rt_rq(rt_se);
- if (rt_rq && rt_rq->rt_nr_running)
- enqueue_rt_entity(rt_se);
- }
+ dequeue_rt_entity(rt_se);
}
/*
@@ -542,8 +549,10 @@ static
void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
{
struct rt_prio_array *array = &rt_rq->active;
+ struct list_head *queue = array->queue + rt_se_prio(rt_se);
- list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
+ if (on_rt_rq(rt_se))
+ list_move_tail(&rt_se->run_list, queue);
}
static void requeue_task_rt(struct rq *rq, struct task_struct *p)
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index a38878e..80179ef 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -198,6 +198,9 @@ static inline void sched_info_queued(struct task_struct *t)
/*
* Called when a process ceases being the active-running process, either
* voluntarily or involuntarily. Now we can calculate how long we ran.
+ * Also, if the process is still in the TASK_RUNNING state, call
+ * sched_info_queued() to mark that it has now again started waiting on
+ * the runqueue.
*/
static inline void sched_info_depart(struct task_struct *t)
{
@@ -206,6 +209,9 @@ static inline void sched_info_depart(struct task_struct *t)
t->sched_info.cpu_time += delta;
rq_sched_info_depart(task_rq(t), delta);
+
+ if (t->state == TASK_RUNNING)
+ sched_info_queued(t);
}
/*
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 01b6522..c828c23 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -49,12 +49,17 @@ static unsigned long get_timestamp(int this_cpu)
return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
}
-void touch_softlockup_watchdog(void)
+static void __touch_softlockup_watchdog(void)
{
int this_cpu = raw_smp_processor_id();
__raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
}
+
+void touch_softlockup_watchdog(void)
+{
+ __raw_get_cpu_var(touch_timestamp) = 0;
+}
EXPORT_SYMBOL(touch_softlockup_watchdog);
void touch_all_softlockup_watchdogs(void)
@@ -80,7 +85,7 @@ void softlockup_tick(void)
unsigned long now;
if (touch_timestamp == 0) {
- touch_softlockup_watchdog();
+ __touch_softlockup_watchdog();
return;
}
@@ -95,7 +100,7 @@ void softlockup_tick(void)
/* do not print during early bootup: */
if (unlikely(system_state != SYSTEM_RUNNING)) {
- touch_softlockup_watchdog();
+ __touch_softlockup_watchdog();
return;
}
@@ -214,7 +219,7 @@ static int watchdog(void *__bind_cpu)
sched_setscheduler(current, SCHED_FIFO, &param);
/* initialize timestamp */
- touch_softlockup_watchdog();
+ __touch_softlockup_watchdog();
set_current_state(TASK_INTERRUPTIBLE);
/*
@@ -223,7 +228,7 @@ static int watchdog(void *__bind_cpu)
* debug-printout triggers in softlockup_tick().
*/
while (!kthread_should_stop()) {
- touch_softlockup_watchdog();
+ __touch_softlockup_watchdog();
schedule();
if (kthread_should_stop())
OpenPOWER on IntegriCloud