summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-18 18:15:49 +0100
committerIngo Molnar <mingo@elte.hu>2009-01-18 18:15:49 +0100
commitaf37501c792107c2bde1524bdae38d9a247b841a (patch)
treeb50ee90d29e72956b8b7d8d19677fe5996755d49 /kernel
parentd859e29fe34cb833071b20aef860ee94fbad9bb2 (diff)
parent99937d6455cea95405ac681c86a857d0fcd530bd (diff)
downloadop-kernel-dev-af37501c792107c2bde1524bdae38d9a247b841a.zip
op-kernel-dev-af37501c792107c2bde1524bdae38d9a247b841a.tar.gz
Merge branch 'core/percpu' into perfcounters/core
Conflicts: arch/x86/include/asm/pda.h We merge tip/core/percpu into tip/perfcounters/core because of a semantic and contextual conflict: the former eliminates the PDA, while the latter extends it with apic_perf_irqs field. Resolve the conflict by moving the new field to the irq_cpustat structure on 64-bit too. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile6
-rw-r--r--kernel/async.c21
-rw-r--r--kernel/irq/chip.c5
-rw-r--r--kernel/irq/handle.c57
-rw-r--r--kernel/irq/internals.h7
-rw-r--r--kernel/irq/manage.c12
-rw-r--r--kernel/irq/migration.c12
-rw-r--r--kernel/irq/numa_migrate.c19
-rw-r--r--kernel/irq/proc.c4
-rw-r--r--kernel/rcutorture.c113
-rw-r--r--kernel/sched.c13
-rw-r--r--kernel/sched_debug.c21
-rw-r--r--kernel/sched_rt.c36
-rw-r--r--kernel/softirq.c5
-rw-r--r--kernel/up.c21
15 files changed, 238 insertions, 114 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 8b2628c..e411592 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -40,7 +40,11 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
-obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
+ifeq ($(CONFIG_USE_GENERIC_SMP_HELPERS),y)
+obj-y += smp.o
+else
+obj-y += up.o
+endif
obj-$(CONFIG_SMP) += spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
diff --git a/kernel/async.c b/kernel/async.c
index f286e9f..608b32b 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -90,12 +90,12 @@ extern int initcall_debug;
static async_cookie_t __lowest_in_progress(struct list_head *running)
{
struct async_entry *entry;
- if (!list_empty(&async_pending)) {
- entry = list_first_entry(&async_pending,
+ if (!list_empty(running)) {
+ entry = list_first_entry(running,
struct async_entry, list);
return entry->cookie;
- } else if (!list_empty(running)) {
- entry = list_first_entry(running,
+ } else if (!list_empty(&async_pending)) {
+ entry = list_first_entry(&async_pending,
struct async_entry, list);
return entry->cookie;
} else {
@@ -104,6 +104,17 @@ static async_cookie_t __lowest_in_progress(struct list_head *running)
}
}
+
+static async_cookie_t lowest_in_progress(struct list_head *running)
+{
+ unsigned long flags;
+ async_cookie_t ret;
+
+ spin_lock_irqsave(&async_lock, flags);
+ ret = __lowest_in_progress(running);
+ spin_unlock_irqrestore(&async_lock, flags);
+ return ret;
+}
/*
* pick the first pending entry and run it
*/
@@ -229,7 +240,7 @@ void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *r
starttime = ktime_get();
}
- wait_event(async_done, __lowest_in_progress(running) >= cookie);
+ wait_event(async_done, lowest_in_progress(running) >= cookie);
if (initcall_debug && system_state == SYSTEM_BOOTING) {
endtime = ktime_get();
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index f63c706..c248eba 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq)
desc->irq_count = 0;
desc->irqs_unhandled = 0;
#ifdef CONFIG_SMP
- cpumask_setall(&desc->affinity);
+ cpumask_setall(desc->affinity);
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ cpumask_clear(desc->pending_mask);
+#endif
#endif
spin_unlock_irqrestore(&desc->lock, flags);
}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index c20db0b..375d68c 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -17,6 +17,7 @@
#include <linux/kernel_stat.h>
#include <linux/rculist.h>
#include <linux/hash.h>
+#include <linux/bootmem.h>
#include "internals.h"
@@ -57,6 +58,7 @@ int nr_irqs = NR_IRQS;
EXPORT_SYMBOL_GPL(nr_irqs);
#ifdef CONFIG_SPARSE_IRQ
+
static struct irq_desc irq_desc_init = {
.irq = -1,
.status = IRQ_DISABLED,
@@ -64,9 +66,6 @@ static struct irq_desc irq_desc_init = {
.handle_irq = handle_bad_irq,
.depth = 1,
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
-#ifdef CONFIG_SMP
- .affinity = CPU_MASK_ALL
-#endif
};
void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
@@ -101,6 +100,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
printk(KERN_ERR "can not alloc kstat_irqs\n");
BUG_ON(1);
}
+ if (!init_alloc_desc_masks(desc, cpu, false)) {
+ printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
+ BUG_ON(1);
+ }
arch_init_chip_data(desc, cpu);
}
@@ -109,7 +112,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
*/
DEFINE_SPINLOCK(sparse_irq_lock);
-struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
+struct irq_desc **irq_desc_ptrs __read_mostly;
static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
[0 ... NR_IRQS_LEGACY-1] = {
@@ -119,14 +122,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
.handle_irq = handle_bad_irq,
.depth = 1,
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
-#ifdef CONFIG_SMP
- .affinity = CPU_MASK_ALL
-#endif
}
};
-/* FIXME: use bootmem alloc ...*/
-static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
+static unsigned int *kstat_irqs_legacy;
int __init early_irq_init(void)
{
@@ -134,18 +133,30 @@ int __init early_irq_init(void)
int legacy_count;
int i;
+ /* initialize nr_irqs based on nr_cpu_ids */
+ arch_probe_nr_irqs();
+ printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
+
desc = irq_desc_legacy;
legacy_count = ARRAY_SIZE(irq_desc_legacy);
+ /* allocate irq_desc_ptrs array based on nr_irqs */
+ irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
+
+ /* allocate based on nr_cpu_ids */
+ /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
+ kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
+ sizeof(int));
+
for (i = 0; i < legacy_count; i++) {
desc[i].irq = i;
- desc[i].kstat_irqs = kstat_irqs_legacy[i];
+ desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
-
+ init_alloc_desc_masks(&desc[i], 0, true);
irq_desc_ptrs[i] = desc + i;
}
- for (i = legacy_count; i < NR_IRQS; i++)
+ for (i = legacy_count; i < nr_irqs; i++)
irq_desc_ptrs[i] = NULL;
return arch_early_irq_init();
@@ -153,7 +164,10 @@ int __init early_irq_init(void)
struct irq_desc *irq_to_desc(unsigned int irq)
{
- return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
+ if (irq_desc_ptrs && irq < nr_irqs)
+ return irq_desc_ptrs[irq];
+
+ return NULL;
}
struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
@@ -162,10 +176,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
unsigned long flags;
int node;
- if (irq >= NR_IRQS) {
- printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
- irq, NR_IRQS);
- WARN_ON(1);
+ if (irq >= nr_irqs) {
+ WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
+ irq, nr_irqs);
return NULL;
}
@@ -207,9 +220,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
.handle_irq = handle_bad_irq,
.depth = 1,
.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
-#ifdef CONFIG_SMP
- .affinity = CPU_MASK_ALL
-#endif
}
};
@@ -219,12 +229,15 @@ int __init early_irq_init(void)
int count;
int i;
+ printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
+
desc = irq_desc;
count = ARRAY_SIZE(irq_desc);
- for (i = 0; i < count; i++)
+ for (i = 0; i < count; i++) {
desc[i].irq = i;
-
+ init_alloc_desc_masks(&desc[i], 0, true);
+ }
return arch_early_irq_init();
}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index e6d0a43..40416a8 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -16,7 +16,14 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
extern struct lock_class_key irq_desc_lock_class;
extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
extern spinlock_t sparse_irq_lock;
+
+#ifdef CONFIG_SPARSE_IRQ
+/* irq_desc_ptrs allocated at boot time */
+extern struct irq_desc **irq_desc_ptrs;
+#else
+/* irq_desc_ptrs is a fixed size array */
extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
+#endif
#ifdef CONFIG_PROC_FS
extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index cd0cd8d..b98739a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -98,14 +98,14 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
- cpumask_copy(&desc->affinity, cpumask);
+ cpumask_copy(desc->affinity, cpumask);
desc->chip->set_affinity(irq, cpumask);
} else {
desc->status |= IRQ_MOVE_PENDING;
- cpumask_copy(&desc->pending_mask, cpumask);
+ cpumask_copy(desc->pending_mask, cpumask);
}
#else
- cpumask_copy(&desc->affinity, cpumask);
+ cpumask_copy(desc->affinity, cpumask);
desc->chip->set_affinity(irq, cpumask);
#endif
desc->status |= IRQ_AFFINITY_SET;
@@ -127,16 +127,16 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
* one of the targets is online.
*/
if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
- if (cpumask_any_and(&desc->affinity, cpu_online_mask)
+ if (cpumask_any_and(desc->affinity, cpu_online_mask)
< nr_cpu_ids)
goto set_affinity;
else
desc->status &= ~IRQ_AFFINITY_SET;
}
- cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
+ cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
set_affinity:
- desc->chip->set_affinity(irq, &desc->affinity);
+ desc->chip->set_affinity(irq, desc->affinity);
return 0;
}
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index bd72329..e05ad9b 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -18,7 +18,7 @@ void move_masked_irq(int irq)
desc->status &= ~IRQ_MOVE_PENDING;
- if (unlikely(cpumask_empty(&desc->pending_mask)))
+ if (unlikely(cpumask_empty(desc->pending_mask)))
return;
if (!desc->chip->set_affinity)
@@ -38,13 +38,13 @@ void move_masked_irq(int irq)
* For correct operation this depends on the caller
* masking the irqs.
*/
- if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask)
+ if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
< nr_cpu_ids)) {
- cpumask_and(&desc->affinity,
- &desc->pending_mask, cpu_online_mask);
- desc->chip->set_affinity(irq, &desc->affinity);
+ cpumask_and(desc->affinity,
+ desc->pending_mask, cpu_online_mask);
+ desc->chip->set_affinity(irq, desc->affinity);
}
- cpumask_clear(&desc->pending_mask);
+ cpumask_clear(desc->pending_mask);
}
void move_native_irq(int irq)
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index ecf765c..666260e 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -38,15 +38,22 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
old_desc->kstat_irqs = NULL;
}
-static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
+static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
struct irq_desc *desc, int cpu)
{
memcpy(desc, old_desc, sizeof(struct irq_desc));
+ if (!init_alloc_desc_masks(desc, cpu, false)) {
+ printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
+ "for migration.\n", irq);
+ return false;
+ }
spin_lock_init(&desc->lock);
desc->cpu = cpu;
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
+ init_copy_desc_masks(old_desc, desc);
arch_init_copy_chip_data(old_desc, desc, cpu);
+ return true;
}
static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
@@ -76,12 +83,18 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
node = cpu_to_node(cpu);
desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
if (!desc) {
- printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq);
+ printk(KERN_ERR "irq %d: can not get new irq_desc "
+ "for migration.\n", irq);
+ /* still use old one */
+ desc = old_desc;
+ goto out_unlock;
+ }
+ if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) {
/* still use old one */
+ kfree(desc);
desc = old_desc;
goto out_unlock;
}
- init_copy_one_irq_desc(irq, old_desc, desc, cpu);
irq_desc_ptrs[irq] = desc;
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index aae3f74..692363d 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir;
static int irq_affinity_proc_show(struct seq_file *m, void *v)
{
struct irq_desc *desc = irq_to_desc((long)m->private);
- const struct cpumask *mask = &desc->affinity;
+ const struct cpumask *mask = desc->affinity;
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PENDING)
- mask = &desc->pending_mask;
+ mask = desc->pending_mask;
#endif
seq_cpumask(m, mask);
seq_putc(m, '\n');
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 1cff28d..7c4142a 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -136,29 +136,47 @@ static int stutter_pause_test = 0;
#endif
int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
-#define FULLSTOP_SHUTDOWN 1 /* Bail due to system shutdown/panic. */
-#define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */
-static int fullstop; /* stop generating callbacks at test end. */
-DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */
- /* spawning of kthreads. */
+/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
+
+#define FULLSTOP_DONTSTOP 0 /* Normal operation. */
+#define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */
+#define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */
+static int fullstop = FULLSTOP_RMMOD;
+DEFINE_MUTEX(fullstop_mutex); /* Protect fullstop transitions and spawning */
+ /* of kthreads. */
/*
- * Detect and respond to a signal-based shutdown.
+ * Detect and respond to a system shutdown.
*/
static int
rcutorture_shutdown_notify(struct notifier_block *unused1,
unsigned long unused2, void *unused3)
{
- if (fullstop)
- return NOTIFY_DONE;
mutex_lock(&fullstop_mutex);
- if (!fullstop)
+ if (fullstop == FULLSTOP_DONTSTOP)
fullstop = FULLSTOP_SHUTDOWN;
+ else
+ printk(KERN_WARNING /* but going down anyway, so... */
+ "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
mutex_unlock(&fullstop_mutex);
return NOTIFY_DONE;
}
/*
+ * Absorb kthreads into a kernel function that won't return, so that
+ * they won't ever access module text or data again.
+ */
+static void rcutorture_shutdown_absorb(char *title)
+{
+ if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
+ printk(KERN_NOTICE
+ "rcutorture thread %s parking due to system shutdown\n",
+ title);
+ schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
+ }
+}
+
+/*
* Allocate an element from the rcu_tortures pool.
*/
static struct rcu_torture *
@@ -219,13 +237,14 @@ rcu_random(struct rcu_random_state *rrsp)
}
static void
-rcu_stutter_wait(void)
+rcu_stutter_wait(char *title)
{
- while ((stutter_pause_test || !rcutorture_runnable) && !fullstop) {
+ while (stutter_pause_test || !rcutorture_runnable) {
if (rcutorture_runnable)
schedule_timeout_interruptible(1);
else
schedule_timeout_interruptible(round_jiffies_relative(HZ));
+ rcutorture_shutdown_absorb(title);
}
}
@@ -287,7 +306,7 @@ rcu_torture_cb(struct rcu_head *p)
int i;
struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
- if (fullstop) {
+ if (fullstop != FULLSTOP_DONTSTOP) {
/* Test is ending, just drop callbacks on the floor. */
/* The next initialization will pick up the pieces. */
return;
@@ -619,10 +638,11 @@ rcu_torture_writer(void *arg)
}
rcu_torture_current_version++;
oldbatch = cur_ops->completed();
- rcu_stutter_wait();
- } while (!kthread_should_stop() && !fullstop);
+ rcu_stutter_wait("rcu_torture_writer");
+ } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
- while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
+ rcutorture_shutdown_absorb("rcu_torture_writer");
+ while (!kthread_should_stop())
schedule_timeout_uninterruptible(1);
return 0;
}
@@ -643,11 +663,12 @@ rcu_torture_fakewriter(void *arg)
schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
udelay(rcu_random(&rand) & 0x3ff);
cur_ops->sync();
- rcu_stutter_wait();
- } while (!kthread_should_stop() && !fullstop);
+ rcu_stutter_wait("rcu_torture_fakewriter");
+ } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
- while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
+ rcutorture_shutdown_absorb("rcu_torture_fakewriter");
+ while (!kthread_should_stop())
schedule_timeout_uninterruptible(1);
return 0;
}
@@ -752,12 +773,13 @@ rcu_torture_reader(void *arg)
preempt_enable();
cur_ops->readunlock(idx);
schedule();
- rcu_stutter_wait();
- } while (!kthread_should_stop() && !fullstop);
+ rcu_stutter_wait("rcu_torture_reader");
+ } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
+ rcutorture_shutdown_absorb("rcu_torture_reader");
if (irqreader && cur_ops->irqcapable)
del_timer_sync(&t);
- while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
+ while (!kthread_should_stop())
schedule_timeout_uninterruptible(1);
return 0;
}
@@ -854,7 +876,8 @@ rcu_torture_stats(void *arg)
do {
schedule_timeout_interruptible(stat_interval * HZ);
rcu_torture_stats_print();
- } while (!kthread_should_stop() && !fullstop);
+ rcutorture_shutdown_absorb("rcu_torture_stats");
+ } while (!kthread_should_stop());
VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
return 0;
}
@@ -866,52 +889,49 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
*/
static void rcu_torture_shuffle_tasks(void)
{
- cpumask_var_t tmp_mask;
+ cpumask_t tmp_mask;
int i;
- if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
- BUG();
-
- cpumask_setall(tmp_mask);
+ cpus_setall(tmp_mask);
get_online_cpus();
/* No point in shuffling if there is only one online CPU (ex: UP) */
- if (num_online_cpus() == 1)
- goto out;
+ if (num_online_cpus() == 1) {
+ put_online_cpus();
+ return;
+ }
if (rcu_idle_cpu != -1)
- cpumask_clear_cpu(rcu_idle_cpu, tmp_mask);
+ cpu_clear(rcu_idle_cpu, tmp_mask);
- set_cpus_allowed_ptr(current, tmp_mask);
+ set_cpus_allowed_ptr(current, &tmp_mask);
if (reader_tasks) {
for (i = 0; i < nrealreaders; i++)
if (reader_tasks[i])
set_cpus_allowed_ptr(reader_tasks[i],
- tmp_mask);
+ &tmp_mask);
}
if (fakewriter_tasks) {
for (i = 0; i < nfakewriters; i++)
if (fakewriter_tasks[i])
set_cpus_allowed_ptr(fakewriter_tasks[i],
- tmp_mask);
+ &tmp_mask);
}
if (writer_task)
- set_cpus_allowed_ptr(writer_task, tmp_mask);
+ set_cpus_allowed_ptr(writer_task, &tmp_mask);
if (stats_task)
- set_cpus_allowed_ptr(stats_task, tmp_mask);
+ set_cpus_allowed_ptr(stats_task, &tmp_mask);
if (rcu_idle_cpu == -1)
rcu_idle_cpu = num_online_cpus() - 1;
else
rcu_idle_cpu--;
-out:
put_online_cpus();
- free_cpumask_var(tmp_mask);
}
/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
@@ -925,7 +945,8 @@ rcu_torture_shuffle(void *arg)
do {
schedule_timeout_interruptible(shuffle_interval * HZ);
rcu_torture_shuffle_tasks();
- } while (!kthread_should_stop() && !fullstop);
+ rcutorture_shutdown_absorb("rcu_torture_shuffle");
+ } while (!kthread_should_stop());
VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
return 0;
}
@@ -940,10 +961,11 @@ rcu_torture_stutter(void *arg)
do {
schedule_timeout_interruptible(stutter * HZ);
stutter_pause_test = 1;
- if (!kthread_should_stop() && !fullstop)
+ if (!kthread_should_stop())
schedule_timeout_interruptible(stutter * HZ);
stutter_pause_test = 0;
- } while (!kthread_should_stop() && !fullstop);
+ rcutorture_shutdown_absorb("rcu_torture_stutter");
+ } while (!kthread_should_stop());
VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
return 0;
}
@@ -970,15 +992,16 @@ rcu_torture_cleanup(void)
int i;
mutex_lock(&fullstop_mutex);
- if (!fullstop) {
- /* If being signaled, let it happen, then exit. */
+ if (fullstop == FULLSTOP_SHUTDOWN) {
+ printk(KERN_WARNING /* but going down anyway, so... */
+ "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
mutex_unlock(&fullstop_mutex);
- schedule_timeout_interruptible(10 * HZ);
+ schedule_timeout_uninterruptible(10);
if (cur_ops->cb_barrier != NULL)
cur_ops->cb_barrier();
return;
}
- fullstop = FULLSTOP_CLEANUP;
+ fullstop = FULLSTOP_RMMOD;
mutex_unlock(&fullstop_mutex);
unregister_reboot_notifier(&rcutorture_nb);
if (stutter_task) {
@@ -1078,7 +1101,7 @@ rcu_torture_init(void)
else
nrealreaders = 2 * num_online_cpus();
rcu_torture_print_module_parms("Start of test");
- fullstop = 0;
+ fullstop = FULLSTOP_DONTSTOP;
/* Set up the freelist. */
diff --git a/kernel/sched.c b/kernel/sched.c
index 43fd212..ce9feca 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -125,6 +125,9 @@ DEFINE_TRACE(sched_switch);
DEFINE_TRACE(sched_migrate_task);
#ifdef CONFIG_SMP
+
+static void double_rq_lock(struct rq *rq1, struct rq *rq2);
+
/*
* Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
* Since cpu_power is a 'constant', we can use a reciprocal divide.
@@ -7352,10 +7355,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
* groups, so roll our own. Now each node has its own list of groups which
* gets dynamically allocated.
*/
-static DEFINE_PER_CPU(struct sched_domain, node_domains);
+static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
static struct sched_group ***sched_group_nodes_bycpu;
-static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
+static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
@@ -7630,7 +7633,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
#ifdef CONFIG_NUMA
if (cpumask_weight(cpu_map) >
SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
- sd = &per_cpu(allnodes_domains, i);
+ sd = &per_cpu(allnodes_domains, i).sd;
SD_INIT(sd, ALLNODES);
set_domain_attribute(sd, attr);
cpumask_copy(sched_domain_span(sd), cpu_map);
@@ -7640,7 +7643,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
} else
p = NULL;
- sd = &per_cpu(node_domains, i);
+ sd = &per_cpu(node_domains, i).sd;
SD_INIT(sd, NODE);
set_domain_attribute(sd, attr);
sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
@@ -7758,7 +7761,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
for_each_cpu(j, nodemask) {
struct sched_domain *sd;
- sd = &per_cpu(node_domains, j);
+ sd = &per_cpu(node_domains, j).sd;
sd->groups = sg;
}
sg->__cpu_power = 0;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 4293cfa..16eeba4e 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -145,6 +145,19 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
read_unlock_irqrestore(&tasklist_lock, flags);
}
+#if defined(CONFIG_CGROUP_SCHED) && \
+ (defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED))
+static void task_group_path(struct task_group *tg, char *buf, int buflen)
+{
+ /* may be NULL if the underlying cgroup isn't fully-created yet */
+ if (!tg->css.cgroup) {
+ buf[0] = '\0';
+ return;
+ }
+ cgroup_path(tg->css.cgroup, buf, buflen);
+}
+#endif
+
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
@@ -154,10 +167,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
unsigned long flags;
#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
- char path[128] = "";
+ char path[128];
struct task_group *tg = cfs_rq->tg;
- cgroup_path(tg->css.cgroup, path, sizeof(path));
+ task_group_path(tg, path, sizeof(path));
SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
@@ -208,10 +221,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
{
#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED)
- char path[128] = "";
+ char path[128];
struct task_group *tg = rt_rq->tg;
- cgroup_path(tg->css.cgroup, path, sizeof(path));
+ task_group_path(tg, path, sizeof(path));
SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path);
#else
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 954e1a8..da932f4 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -960,16 +960,17 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
-static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
+static inline int pick_optimal_cpu(int this_cpu,
+ const struct cpumask *mask)
{
int first;
/* "this_cpu" is cheaper to preempt than a remote processor */
- if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
+ if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
return this_cpu;
- first = first_cpu(*mask);
- if (first != NR_CPUS)
+ first = cpumask_first(mask);
+ if (first < nr_cpu_ids)
return first;
return -1;
@@ -981,6 +982,7 @@ static int find_lowest_rq(struct task_struct *task)
struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
int this_cpu = smp_processor_id();
int cpu = task_cpu(task);
+ cpumask_var_t domain_mask;
if (task->rt.nr_cpus_allowed == 1)
return -1; /* No other targets possible */
@@ -1013,19 +1015,25 @@ static int find_lowest_rq(struct task_struct *task)
if (this_cpu == cpu)
this_cpu = -1; /* Skip this_cpu opt if the same */
- for_each_domain(cpu, sd) {
- if (sd->flags & SD_WAKE_AFFINE) {
- cpumask_t domain_mask;
- int best_cpu;
+ if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
+ for_each_domain(cpu, sd) {
+ if (sd->flags & SD_WAKE_AFFINE) {
+ int best_cpu;
- cpumask_and(&domain_mask, sched_domain_span(sd),
- lowest_mask);
+ cpumask_and(domain_mask,
+ sched_domain_span(sd),
+ lowest_mask);
- best_cpu = pick_optimal_cpu(this_cpu,
- &domain_mask);
- if (best_cpu != -1)
- return best_cpu;
+ best_cpu = pick_optimal_cpu(this_cpu,
+ domain_mask);
+
+ if (best_cpu != -1) {
+ free_cpumask_var(domain_mask);
+ return best_cpu;
+ }
+ }
}
+ free_cpumask_var(domain_mask);
}
/*
diff --git a/kernel/softirq.c b/kernel/softirq.c
index bdbe9de..0365b48 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -795,6 +795,11 @@ int __init __weak early_irq_init(void)
return 0;
}
+int __init __weak arch_probe_nr_irqs(void)
+{
+ return 0;
+}
+
int __init __weak arch_early_irq_init(void)
{
return 0;
diff --git a/kernel/up.c b/kernel/up.c
new file mode 100644
index 0000000..1ff27a2
--- /dev/null
+++ b/kernel/up.c
@@ -0,0 +1,21 @@
+/*
+ * Uniprocessor-only support functions. The counterpart to kernel/smp.c
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int wait)
+{
+ WARN_ON(cpu != 0);
+
+ local_irq_disable();
+ (func)(info);
+ local_irq_enable();
+
+ return 0;
+}
+EXPORT_SYMBOL(smp_call_function_single);
OpenPOWER on IntegriCloud