diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/backtracetest.c | 65 | ||||
-rw-r--r-- | kernel/hrtimer.c | 7 | ||||
-rw-r--r-- | kernel/irq/manage.c | 33 | ||||
-rw-r--r-- | kernel/irq/proc.c | 59 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 3 | ||||
-rw-r--r-- | kernel/stacktrace.c | 14 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 8 |
7 files changed, 158 insertions, 31 deletions
diff --git a/kernel/backtracetest.c b/kernel/backtracetest.c index d1a7605..a5e026b 100644 --- a/kernel/backtracetest.c +++ b/kernel/backtracetest.c @@ -10,30 +10,73 @@ * of the License. */ +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/interrupt.h> #include <linux/module.h> #include <linux/sched.h> -#include <linux/delay.h> +#include <linux/stacktrace.h> + +static void backtrace_test_normal(void) +{ + printk("Testing a backtrace from process context.\n"); + printk("The following trace is a kernel self test and not a bug!\n"); -static struct timer_list backtrace_timer; + dump_stack(); +} -static void backtrace_test_timer(unsigned long data) +static DECLARE_COMPLETION(backtrace_work); + +static void backtrace_test_irq_callback(unsigned long data) +{ + dump_stack(); + complete(&backtrace_work); +} + +static DECLARE_TASKLET(backtrace_tasklet, &backtrace_test_irq_callback, 0); + +static void backtrace_test_irq(void) { printk("Testing a backtrace from irq context.\n"); printk("The following trace is a kernel self test and not a bug!\n"); - dump_stack(); + + init_completion(&backtrace_work); + tasklet_schedule(&backtrace_tasklet); + wait_for_completion(&backtrace_work); +} + +#ifdef CONFIG_STACKTRACE +static void backtrace_test_saved(void) +{ + struct stack_trace trace; + unsigned long entries[8]; + + printk("Testing a saved backtrace.\n"); + printk("The following trace is a kernel self test and not a bug!\n"); + + trace.nr_entries = 0; + trace.max_entries = ARRAY_SIZE(entries); + trace.entries = entries; + trace.skip = 0; + + save_stack_trace(&trace); + print_stack_trace(&trace, 0); +} +#else +static void backtrace_test_saved(void) +{ + printk("Saved backtrace test skipped.\n"); } +#endif + static int backtrace_regression_test(void) { printk("====[ backtrace testing ]===========\n"); - printk("Testing a backtrace from process context.\n"); - printk("The following trace is a kernel self test and not a bug!\n"); - dump_stack(); - init_timer(&backtrace_timer); - backtrace_timer.function = backtrace_test_timer; - mod_timer(&backtrace_timer, jiffies + 10); + backtrace_test_normal(); + backtrace_test_irq(); + backtrace_test_saved(); - msleep(10); printk("====[ end of backtrace testing ]====\n"); return 0; } diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 27a83ee..2913a8b 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -300,11 +300,10 @@ EXPORT_SYMBOL_GPL(ktime_sub_ns); */ u64 ktime_divns(const ktime_t kt, s64 div) { - u64 dclc, inc, dns; + u64 dclc; int sft = 0; - dclc = dns = ktime_to_ns(kt); - inc = div; + dclc = ktime_to_ns(kt); /* Make sure the divisor is less than 2^32: */ while (div >> 32) { sft++; @@ -632,8 +631,6 @@ void clock_was_set(void) */ void hres_timers_resume(void) { - WARN_ON_ONCE(num_online_cpus() > 1); - /* Retrigger the CPU local events: */ retrigger_next_event(NULL); } diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 46d6611..77a51be 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -17,6 +17,8 @@ #ifdef CONFIG_SMP +cpumask_t irq_default_affinity = CPU_MASK_ALL; + /** * synchronize_irq - wait for pending IRQ handlers (on other CPUs) * @irq: interrupt number to wait for @@ -95,6 +97,27 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) return 0; } +#ifndef CONFIG_AUTO_IRQ_AFFINITY +/* + * Generic version of the affinity autoselector. + */ +int irq_select_affinity(unsigned int irq) +{ + cpumask_t mask; + + if (!irq_can_set_affinity(irq)) + return 0; + + cpus_and(mask, cpu_online_map, irq_default_affinity); + + irq_desc[irq].affinity = mask; + irq_desc[irq].chip->set_affinity(irq, mask); + + set_balance_irq_affinity(irq, mask); + return 0; +} +#endif + #endif /** @@ -354,7 +377,7 @@ int setup_irq(unsigned int irq, struct irqaction *new) /* Setup the type (level, edge polarity) if configured: */ if (new->flags & IRQF_TRIGGER_MASK) { - if (desc->chip && desc->chip->set_type) + if (desc->chip->set_type) desc->chip->set_type(irq, new->flags & IRQF_TRIGGER_MASK); else @@ -364,8 +387,7 @@ int setup_irq(unsigned int irq, struct irqaction *new) */ printk(KERN_WARNING "No IRQF_TRIGGER set_type " "function for IRQ %d (%s)\n", irq, - desc->chip ? desc->chip->name : - "unknown"); + desc->chip->name); } else compat_irq_chip_set_default_handler(desc); @@ -382,6 +404,9 @@ int setup_irq(unsigned int irq, struct irqaction *new) } else /* Undo nested disables: */ desc->depth = 1; + + /* Set default affinity mask once everything is setup */ + irq_select_affinity(irq); } /* Reset broken irq detection when installing new handler */ desc->irq_count = 0; @@ -571,8 +596,6 @@ int request_irq(unsigned int irq, irq_handler_t handler, action->next = NULL; action->dev_id = dev_id; - select_smp_affinity(irq); - #ifdef CONFIG_DEBUG_SHIRQ if (irqflags & IRQF_SHARED) { /* diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index c2f2ccb..6c6d35d 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -44,7 +44,7 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer, unsigned long count, void *data) { unsigned int irq = (int)(long)data, full_count = count, err; - cpumask_t new_value, tmp; + cpumask_t new_value; if (!irq_desc[irq].chip->set_affinity || no_irq_affinity || irq_balancing_disabled(irq)) @@ -62,17 +62,51 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer, * way to make the system unusable accidentally :-) At least * one online CPU still has to be targeted. */ - cpus_and(tmp, new_value, cpu_online_map); - if (cpus_empty(tmp)) + if (!cpus_intersects(new_value, cpu_online_map)) /* Special case for empty set - allow the architecture code to set default SMP affinity. */ - return select_smp_affinity(irq) ? -EINVAL : full_count; + return irq_select_affinity(irq) ? -EINVAL : full_count; irq_set_affinity(irq, new_value); return full_count; } +static int default_affinity_read(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len = cpumask_scnprintf(page, count, irq_default_affinity); + if (count - len < 2) + return -EINVAL; + len += sprintf(page + len, "\n"); + return len; +} + +static int default_affinity_write(struct file *file, const char __user *buffer, + unsigned long count, void *data) +{ + unsigned int full_count = count, err; + cpumask_t new_value; + + err = cpumask_parse_user(buffer, count, new_value); + if (err) + return err; + + if (!is_affinity_mask_valid(new_value)) + return -EINVAL; + + /* + * Do not allow disabling IRQs completely - it's a too easy + * way to make the system unusable accidentally :-) At least + * one online CPU still has to be targeted. + */ + if (!cpus_intersects(new_value, cpu_online_map)) + return -EINVAL; + + irq_default_affinity = new_value; + + return full_count; +} #endif static int irq_spurious_read(char *page, char **start, off_t off, @@ -171,6 +205,21 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action) remove_proc_entry(action->dir->name, irq_desc[irq].dir); } +void register_default_affinity_proc(void) +{ +#ifdef CONFIG_SMP + struct proc_dir_entry *entry; + + /* create /proc/irq/default_smp_affinity */ + entry = create_proc_entry("default_smp_affinity", 0600, root_irq_dir); + if (entry) { + entry->data = NULL; + entry->read_proc = default_affinity_read; + entry->write_proc = default_affinity_write; + } +#endif +} + void init_irq_proc(void) { int i; @@ -180,6 +229,8 @@ void init_irq_proc(void) if (!root_irq_dir) return; + register_default_affinity_proc(); + /* * Create entries for all existing IRQs. */ diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index f1525ad..c42a03a 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -1037,6 +1037,9 @@ static void check_thread_timers(struct task_struct *tsk, sig->rlim[RLIMIT_RTTIME].rlim_cur += USEC_PER_SEC; } + printk(KERN_INFO + "RT Watchdog Timeout: %s[%d]\n", + tsk->comm, task_pid_nr(tsk)); __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); } } diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c index b71816e..94b527e 100644 --- a/kernel/stacktrace.c +++ b/kernel/stacktrace.c @@ -6,19 +6,21 @@ * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> */ #include <linux/sched.h> +#include <linux/module.h> #include <linux/kallsyms.h> #include <linux/stacktrace.h> void print_stack_trace(struct stack_trace *trace, int spaces) { - int i, j; + int i; - for (i = 0; i < trace->nr_entries; i++) { - unsigned long ip = trace->entries[i]; + if (WARN_ON(!trace->entries)) + return; - for (j = 0; j < spaces + 1; j++) - printk(" "); - print_ip_sym(ip); + for (i = 0; i < trace->nr_entries; i++) { + printk("%*c", 1 + spaces, ' '); + print_ip_sym(trace->entries[i]); } } +EXPORT_SYMBOL_GPL(print_stack_trace); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index d63008b..beef7cc 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -48,6 +48,13 @@ static void tick_do_update_jiffies64(ktime_t now) unsigned long ticks = 0; ktime_t delta; + /* + * Do a quick check without holding xtime_lock: + */ + delta = ktime_sub(now, last_jiffies_update); + if (delta.tv64 < tick_period.tv64) + return; + /* Reevalute with xtime_lock held */ write_seqlock(&xtime_lock); @@ -228,6 +235,7 @@ void tick_nohz_stop_sched_tick(void) local_softirq_pending()); ratelimit++; } + goto end; } ts->idle_calls++; |