summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/freezer.c7
-rw-r--r--kernel/irq/internals.h3
-rw-r--r--kernel/irq/manage.c50
-rw-r--r--kernel/irq/migration.c2
-rw-r--r--kernel/perf_counter.c1
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/sched_fair.c10
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--kernel/timer.c2
9 files changed, 66 insertions, 15 deletions
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 2f4936c..bd1d42b 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -44,12 +44,19 @@ void refrigerator(void)
recalc_sigpending(); /* We sent fake signal, clean it up */
spin_unlock_irq(&current->sighand->siglock);
+ /* prevent accounting of that task to load */
+ current->flags |= PF_FREEZING;
+
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (!frozen(current))
break;
schedule();
}
+
+ /* Remove the accounting blocker */
+ current->flags &= ~PF_FREEZING;
+
pr_debug("%s left refrigerator\n", current->comm);
__set_current_state(save);
}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 7346825..e70ed55 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -42,8 +42,7 @@ static inline void unregister_handler_proc(unsigned int irq,
extern int irq_select_affinity_usr(unsigned int irq);
-extern void
-irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask);
+extern void irq_set_thread_affinity(struct irq_desc *desc);
/*
* Debugging printout:
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 50da676..f0de36f 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -80,14 +80,22 @@ int irq_can_set_affinity(unsigned int irq)
return 1;
}
-void
-irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask)
+/**
+ * irq_set_thread_affinity - Notify irq threads to adjust affinity
+ * @desc: irq descriptor which has affitnity changed
+ *
+ * We just set IRQTF_AFFINITY and delegate the affinity setting
+ * to the interrupt thread itself. We can not call
+ * set_cpus_allowed_ptr() here as we hold desc->lock and this
+ * code can be called from hard interrupt context.
+ */
+void irq_set_thread_affinity(struct irq_desc *desc)
{
struct irqaction *action = desc->action;
while (action) {
if (action->thread)
- set_cpus_allowed_ptr(action->thread, cpumask);
+ set_bit(IRQTF_AFFINITY, &action->thread_flags);
action = action->next;
}
}
@@ -112,7 +120,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
if (desc->status & IRQ_MOVE_PCNTXT) {
if (!desc->chip->set_affinity(irq, cpumask)) {
cpumask_copy(desc->affinity, cpumask);
- irq_set_thread_affinity(desc, cpumask);
+ irq_set_thread_affinity(desc);
}
}
else {
@@ -122,7 +130,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
#else
if (!desc->chip->set_affinity(irq, cpumask)) {
cpumask_copy(desc->affinity, cpumask);
- irq_set_thread_affinity(desc, cpumask);
+ irq_set_thread_affinity(desc);
}
#endif
desc->status |= IRQ_AFFINITY_SET;
@@ -176,7 +184,7 @@ int irq_select_affinity_usr(unsigned int irq)
spin_lock_irqsave(&desc->lock, flags);
ret = setup_affinity(irq, desc);
if (!ret)
- irq_set_thread_affinity(desc, desc->affinity);
+ irq_set_thread_affinity(desc);
spin_unlock_irqrestore(&desc->lock, flags);
return ret;
@@ -444,6 +452,34 @@ static int irq_wait_for_interrupt(struct irqaction *action)
}
/*
+ * Check whether we need to change the affinity of the interrupt thread.
+ */
+static void
+irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
+{
+ cpumask_var_t mask;
+
+ if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
+ return;
+
+ /*
+ * In case we are out of memory we set IRQTF_AFFINITY again and
+ * try again next time
+ */
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ set_bit(IRQTF_AFFINITY, &action->thread_flags);
+ return;
+ }
+
+ spin_lock_irq(&desc->lock);
+ cpumask_copy(mask, desc->affinity);
+ spin_unlock_irq(&desc->lock);
+
+ set_cpus_allowed_ptr(current, mask);
+ free_cpumask_var(mask);
+}
+
+/*
* Interrupt handler thread
*/
static int irq_thread(void *data)
@@ -458,6 +494,8 @@ static int irq_thread(void *data)
while (!irq_wait_for_interrupt(action)) {
+ irq_thread_check_affinity(desc, action);
+
atomic_inc(&desc->threads_active);
spin_lock_irq(&desc->lock);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index cfe767c..fcb6c96 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -45,7 +45,7 @@ void move_masked_irq(int irq)
< nr_cpu_ids))
if (!desc->chip->set_affinity(irq, desc->pending_mask)) {
cpumask_copy(desc->affinity, desc->pending_mask);
- irq_set_thread_affinity(desc, desc->pending_mask);
+ irq_set_thread_affinity(desc);
}
cpumask_clear(desc->pending_mask);
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index a641eb7..7bc888d 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2665,6 +2665,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
header.size += sizeof(cpu_entry);
cpu_entry.cpu = raw_smp_processor_id();
+ cpu_entry.reserved = 0;
}
if (sample_type & PERF_SAMPLE_PERIOD)
diff --git a/kernel/sched.c b/kernel/sched.c
index 98972d3..1b59e26 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7289,6 +7289,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
static void calc_global_load_remove(struct rq *rq)
{
atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
+ rq->calc_load_active = 0;
}
#endif /* CONFIG_HOTPLUG_CPU */
@@ -7515,6 +7516,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
task_rq_unlock(rq, &flags);
get_task_struct(p);
cpu_rq(cpu)->migration_thread = p;
+ rq->calc_load_update = calc_load_update;
break;
case CPU_ONLINE:
@@ -7525,8 +7527,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
/* Update our root-domain */
rq = cpu_rq(cpu);
spin_lock_irqsave(&rq->lock, flags);
- rq->calc_load_update = calc_load_update;
- rq->calc_load_active = 0;
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 7c248dc..9ffb2b2 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -266,6 +266,12 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
return min_vruntime;
}
+static inline int entity_before(struct sched_entity *a,
+ struct sched_entity *b)
+{
+ return (s64)(a->vruntime - b->vruntime) < 0;
+}
+
static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
return se->vruntime - cfs_rq->min_vruntime;
@@ -1017,7 +1023,7 @@ static void yield_task_fair(struct rq *rq)
/*
* Already in the rightmost position?
*/
- if (unlikely(!rightmost || rightmost->vruntime < se->vruntime))
+ if (unlikely(!rightmost || entity_before(rightmost, se)))
return;
/*
@@ -1713,7 +1719,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
/* 'curr' will be NULL if the child belongs to a different group */
if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
- curr && curr->vruntime < se->vruntime) {
+ curr && entity_before(curr, se)) {
/*
* Upon rescheduling, sched_class::put_prev_task() will place
* 'current' within the tree based on its new key value.
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 592bf58..7466cb8 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -513,7 +513,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
* Check to make sure we don't switch to a non-highres capable
* clocksource if the tick code is in oneshot mode (highres or nohz)
*/
- if (tick_oneshot_mode_active() &&
+ if (tick_oneshot_mode_active() && ovr &&
!(ovr->flags & CLOCK_SOURCE_VALID_FOR_HRES)) {
printk(KERN_WARNING "%s clocksource is not HRT compatible. "
"Cannot switch while in HRT/NOHZ mode\n", ovr->name);
diff --git a/kernel/timer.c b/kernel/timer.c
index 0b36b9e..a7f07d5 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -714,7 +714,7 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
* networking code - if the timer is re-modified
* to be the same thing then just return:
*/
- if (timer->expires == expires && timer_pending(timer))
+ if (timer_pending(timer) && timer->expires == expires)
return 1;
return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
OpenPOWER on IntegriCloud