diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-05-07 11:17:13 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-07 11:17:34 +0200 |
commit | 44347d947f628060b92449702071bfe1d31dfb75 (patch) | |
tree | c6ed74610d5b3295df4296659f80f5feb94b28cc /kernel | |
parent | d94fc523f3c35bd8013f04827e94756cbc0212f4 (diff) | |
parent | 413f81eba35d6ede9289b0c8a920c013a84fac71 (diff) | |
download | op-kernel-dev-44347d947f628060b92449702071bfe1d31dfb75.zip op-kernel-dev-44347d947f628060b92449702071bfe1d31dfb75.tar.gz |
Merge branch 'linus' into tracing/core
Merge reason: tracing/core was on a .30-rc1 base and was missing out on
on a handful of tracing fixes present in .30-rc5-almost.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/audit_tree.c | 3 | ||||
-rw-r--r-- | kernel/irq/handle.c | 2 | ||||
-rw-r--r-- | kernel/irq/manage.c | 5 | ||||
-rw-r--r-- | kernel/lockdep.c | 22 | ||||
-rw-r--r-- | kernel/panic.c | 2 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 8 | ||||
-rw-r--r-- | kernel/power/disk.c | 51 | ||||
-rw-r--r-- | kernel/power/main.c | 24 | ||||
-rw-r--r-- | kernel/power/swap.c | 2 | ||||
-rw-r--r-- | kernel/ptrace.c | 11 | ||||
-rw-r--r-- | kernel/rcupdate.c | 18 | ||||
-rw-r--r-- | kernel/rcutree.c | 19 | ||||
-rw-r--r-- | kernel/rcutree_trace.c | 14 | ||||
-rw-r--r-- | kernel/resource.c | 46 | ||||
-rw-r--r-- | kernel/sched.c | 12 | ||||
-rw-r--r-- | kernel/slow-work.c | 4 | ||||
-rw-r--r-- | kernel/softirq.c | 4 | ||||
-rw-r--r-- | kernel/sys.c | 24 | ||||
-rw-r--r-- | kernel/sysctl.c | 25 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 8 | ||||
-rw-r--r-- | kernel/time/jiffies.c | 2 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 12 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 12 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_power.c | 7 |
25 files changed, 175 insertions, 170 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 917ab95..6e73517 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -734,9 +734,6 @@ int audit_tag_tree(char *old, char *new) dentry = dget(path.dentry); path_put(&path); - if (dentry == tagged->mnt_root && dentry == mnt->mnt_root) - follow_up(&mnt, &dentry); - list_add_tail(&list, &tagged->mnt_list); mutex_lock(&audit_filter_mutex); diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index e68bb5a..5dd2572 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -360,8 +360,6 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) irqreturn_t ret, retval = IRQ_NONE; unsigned int status = 0; - WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!"); - if (!(action->flags & IRQF_DISABLED)) local_irq_enable_in_hardirq(); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 7e2e7dd..2734eca 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -109,10 +109,9 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) spin_lock_irqsave(&desc->lock, flags); #ifdef CONFIG_GENERIC_PENDING_IRQ - if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { - cpumask_copy(desc->affinity, cpumask); + if (desc->status & IRQ_MOVE_PCNTXT) desc->chip->set_affinity(irq, cpumask); - } else { + else { desc->status |= IRQ_MOVE_PENDING; cpumask_copy(desc->pending_mask, cpumask); } diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 47b201e..8bbeef9 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2492,13 +2492,20 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, void lockdep_init_map(struct lockdep_map *lock, const char *name, struct lock_class_key *key, int subclass) { - if (unlikely(!debug_locks)) + lock->class_cache = NULL; +#ifdef CONFIG_LOCK_STAT + lock->cpu = raw_smp_processor_id(); +#endif + + if (DEBUG_LOCKS_WARN_ON(!name)) { + lock->name = "NULL"; return; + } + + lock->name = name; if (DEBUG_LOCKS_WARN_ON(!key)) return; - if (DEBUG_LOCKS_WARN_ON(!name)) - return; /* * Sanity check, the lock-class key must be persistent: */ @@ -2507,12 +2514,11 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, DEBUG_LOCKS_WARN_ON(1); return; } - lock->name = name; lock->key = key; - lock->class_cache = NULL; -#ifdef CONFIG_LOCK_STAT - lock->cpu = raw_smp_processor_id(); -#endif + + if (unlikely(!debug_locks)) + return; + if (subclass) register_lock_class(lock, subclass, 1); } diff --git a/kernel/panic.c b/kernel/panic.c index 934fb37..3dcaa16 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -221,7 +221,7 @@ void add_taint(unsigned flag) * post-warning case. */ if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off()) - printk(KERN_WARNING "Disabling lockdep due to kernel taint\n"); + printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n"); set_bit(flag, &tainted_mask); } diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index c9dcf98..bece7c0 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -1420,19 +1420,19 @@ void run_posix_cpu_timers(struct task_struct *tsk) * timer call will interfere. */ list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { - int firing; + int cpu_firing; + spin_lock(&timer->it_lock); list_del_init(&timer->it.cpu.entry); - firing = timer->it.cpu.firing; + cpu_firing = timer->it.cpu.firing; timer->it.cpu.firing = 0; /* * The firing flag is -1 if we collided with a reset * of the timer, which already reported this * almost-firing as an overrun. So don't generate an event. */ - if (likely(firing >= 0)) { + if (likely(cpu_firing >= 0)) cpu_timer_fire(timer); - } spin_unlock(&timer->it_lock); } } diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 0854770..e71ca9c 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -646,13 +646,6 @@ static int software_resume(void) return 0; /* - * We can't depend on SCSI devices being available after loading one of - * their modules if scsi_complete_async_scans() is not called and the - * resume device usually is a SCSI one. - */ - scsi_complete_async_scans(); - - /* * name_to_dev_t() below takes a sysfs buffer mutex when sysfs * is configured into the kernel. Since the regular hibernate * trigger path is via sysfs which takes a buffer mutex before @@ -663,32 +656,42 @@ static int software_resume(void) * here to avoid lockdep complaining. */ mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); + + if (swsusp_resume_device) + goto Check_image; + + if (!strlen(resume_file)) { + error = -ENOENT; + goto Unlock; + } + + pr_debug("PM: Checking image partition %s\n", resume_file); + + /* Check if the device is there */ + swsusp_resume_device = name_to_dev_t(resume_file); if (!swsusp_resume_device) { - if (!strlen(resume_file)) { - mutex_unlock(&pm_mutex); - return -ENOENT; - } /* * Some device discovery might still be in progress; we need * to wait for this to finish. */ wait_for_device_probe(); + /* + * We can't depend on SCSI devices being available after loading + * one of their modules until scsi_complete_async_scans() is + * called and the resume device usually is a SCSI one. + */ + scsi_complete_async_scans(); + swsusp_resume_device = name_to_dev_t(resume_file); - pr_debug("PM: Resume from partition %s\n", resume_file); - } else { - pr_debug("PM: Resume from partition %d:%d\n", - MAJOR(swsusp_resume_device), - MINOR(swsusp_resume_device)); + if (!swsusp_resume_device) { + error = -ENODEV; + goto Unlock; + } } - if (noresume) { - /** - * FIXME: If noresume is specified, we need to find the - * partition and reset it back to normal swap space. - */ - mutex_unlock(&pm_mutex); - return 0; - } + Check_image: + pr_debug("PM: Resume from partition %d:%d\n", + MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); pr_debug("PM: Checking hibernation image.\n"); error = swsusp_check(); diff --git a/kernel/power/main.c b/kernel/power/main.c index f172f41..f99ed6a 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -291,20 +291,26 @@ static int suspend_enter(suspend_state_t state) device_pm_lock(); + if (suspend_ops->prepare) { + error = suspend_ops->prepare(); + if (error) + goto Done; + } + error = device_power_down(PMSG_SUSPEND); if (error) { printk(KERN_ERR "PM: Some devices failed to power down\n"); - goto Done; + goto Platfrom_finish; } - if (suspend_ops->prepare) { - error = suspend_ops->prepare(); + if (suspend_ops->prepare_late) { + error = suspend_ops->prepare_late(); if (error) goto Power_up_devices; } if (suspend_test(TEST_PLATFORM)) - goto Platfrom_finish; + goto Platform_wake; error = disable_nonboot_cpus(); if (error || suspend_test(TEST_CPUS)) @@ -326,13 +332,17 @@ static int suspend_enter(suspend_state_t state) Enable_cpus: enable_nonboot_cpus(); - Platfrom_finish: - if (suspend_ops->finish) - suspend_ops->finish(); + Platform_wake: + if (suspend_ops->wake) + suspend_ops->wake(); Power_up_devices: device_power_up(PMSG_RESUME); + Platfrom_finish: + if (suspend_ops->finish) + suspend_ops->finish(); + Done: device_pm_unlock(); diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 505f319..8ba052c 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -64,8 +64,6 @@ static int submit(int rw, pgoff_t page_off, struct page *page, struct bio *bio; bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); - if (!bio) - return -ENOMEM; bio->bi_sector = page_off * (PAGE_SIZE >> 9); bio->bi_bdev = resume_bdev; bio->bi_end_io = end_swap_bio_read; diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 64191fa..0692ab5 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -188,7 +188,7 @@ int ptrace_attach(struct task_struct *task) /* Protect exec's credential calculations against our interference; * SUID, SGID and LSM creds get determined differently under ptrace. */ - retval = mutex_lock_interruptible(¤t->cred_exec_mutex); + retval = mutex_lock_interruptible(&task->cred_exec_mutex); if (retval < 0) goto out; @@ -232,7 +232,7 @@ repeat: bad: write_unlock_irqrestore(&tasklist_lock, flags); task_unlock(task); - mutex_unlock(¤t->cred_exec_mutex); + mutex_unlock(&task->cred_exec_mutex); out: return retval; } @@ -604,10 +604,11 @@ repeat: ret = security_ptrace_traceme(current->parent); /* - * Set the ptrace bit in the process ptrace flags. - * Then link us on our parent's ptraced list. + * Check PF_EXITING to ensure ->real_parent has not passed + * exit_ptrace(). Otherwise we don't report the error but + * pretend ->real_parent untraces us right after return. */ - if (!ret) { + if (!ret && !(current->real_parent->flags & PF_EXITING)) { current->ptrace |= PT_PTRACED; __ptrace_link(current, current->real_parent); } diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 2c7b845..a967c9f 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -58,6 +58,10 @@ static DEFINE_MUTEX(rcu_barrier_mutex); static struct completion rcu_barrier_completion; int rcu_scheduler_active __read_mostly; +static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0); +static struct rcu_head rcu_migrate_head[3]; +static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq); + /* * Awaken the corresponding synchronize_rcu() instance now that a * grace period has elapsed. @@ -122,7 +126,10 @@ static void rcu_barrier_func(void *type) } } -static inline void wait_migrated_callbacks(void); +static inline void wait_migrated_callbacks(void) +{ + wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); +} /* * Orchestrate the specified type of RCU barrier, waiting for all @@ -179,21 +186,12 @@ void rcu_barrier_sched(void) } EXPORT_SYMBOL_GPL(rcu_barrier_sched); -static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0); -static struct rcu_head rcu_migrate_head[3]; -static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq); - static void rcu_migrate_callback(struct rcu_head *notused) { if (atomic_dec_and_test(&rcu_migrate_type_count)) wake_up(&rcu_migrate_wq); } -static inline void wait_migrated_callbacks(void) -{ - wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); -} - static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, unsigned long action, void *hcpu) { diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 7f32669..d2a372f 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -530,8 +530,6 @@ static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) rdp->qs_pending = 1; rdp->passed_quiesc = 0; rdp->gpnum = rsp->gpnum; - rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + - RCU_JIFFIES_TILL_FORCE_QS; } /* @@ -578,8 +576,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) rsp->gpnum++; rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; - rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + - RCU_JIFFIES_TILL_FORCE_QS; record_gp_stall_check_time(rsp); dyntick_record_completed(rsp, rsp->completed - 1); note_new_gpnum(rsp, rdp); @@ -1055,7 +1051,6 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) { unsigned long flags; long lastcomp; - struct rcu_data *rdp = rsp->rda[smp_processor_id()]; struct rcu_node *rnp = rcu_get_root(rsp); u8 signaled; @@ -1066,16 +1061,13 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) return; /* Someone else is already on the job. */ } if (relaxed && - (long)(rsp->jiffies_force_qs - jiffies) >= 0 && - (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) >= 0) + (long)(rsp->jiffies_force_qs - jiffies) >= 0) goto unlock_ret; /* no emergency and done recently. */ rsp->n_force_qs++; spin_lock(&rnp->lock); lastcomp = rsp->completed; signaled = rsp->signaled; rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; - rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + - RCU_JIFFIES_TILL_FORCE_QS; if (lastcomp == rsp->gpnum) { rsp->n_force_qs_ngp++; spin_unlock(&rnp->lock); @@ -1144,8 +1136,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) * If an RCU GP has gone long enough, go check for dyntick * idle CPUs and, if needed, send resched IPIs. */ - if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || - (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0) + if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) force_quiescent_state(rsp, 1); /* @@ -1230,8 +1221,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), if (unlikely(++rdp->qlen > qhimark)) { rdp->blimit = LONG_MAX; force_quiescent_state(rsp, 0); - } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || - (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0) + } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) force_quiescent_state(rsp, 1); local_irq_restore(flags); } @@ -1290,8 +1280,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) /* Has an RCU GP gone long enough to send resched IPIs &c? */ if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && - ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || - (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)) + ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) return 1; /* nothing to do */ diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index 4ee954f..4b1875b 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c @@ -49,14 +49,12 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) { if (!rdp->beenonline) return; - seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d rpfq=%ld rp=%x", + seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d", rdp->cpu, cpu_is_offline(rdp->cpu) ? '!' : ' ', rdp->completed, rdp->gpnum, rdp->passed_quiesc, rdp->passed_quiesc_completed, - rdp->qs_pending, - rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending, - (int)(rdp->n_rcu_pending & 0xffff)); + rdp->qs_pending); #ifdef CONFIG_NO_HZ seq_printf(m, " dt=%d/%d dn=%d df=%lu", rdp->dynticks->dynticks, @@ -102,14 +100,12 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) { if (!rdp->beenonline) return; - seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d,%ld,%ld", + seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d", rdp->cpu, cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"", rdp->completed, rdp->gpnum, rdp->passed_quiesc, rdp->passed_quiesc_completed, - rdp->qs_pending, - rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending, - rdp->n_rcu_pending); + rdp->qs_pending); #ifdef CONFIG_NO_HZ seq_printf(m, ",%d,%d,%d,%lu", rdp->dynticks->dynticks, @@ -123,7 +119,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) static int show_rcudata_csv(struct seq_file *m, void *unused) { - seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",\"rpfq\",\"rp\","); + seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\","); #ifdef CONFIG_NO_HZ seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); #endif /* #ifdef CONFIG_NO_HZ */ diff --git a/kernel/resource.c b/kernel/resource.c index fd5d7d5..ac5f3a3 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -533,43 +533,21 @@ static void __init __reserve_region_with_split(struct resource *root, res->end = end; res->flags = IORESOURCE_BUSY; - for (;;) { - conflict = __request_resource(parent, res); - if (!conflict) - break; - if (conflict != parent) { - parent = conflict; - if (!(conflict->flags & IORESOURCE_BUSY)) - continue; - } - - /* Uhhuh, that didn't work out.. */ - kfree(res); - res = NULL; - break; - } - - if (!res) { - /* failed, split and try again */ - - /* conflict covered whole area */ - if (conflict->start <= start && conflict->end >= end) - return; + conflict = __request_resource(parent, res); + if (!conflict) + return; - if (conflict->start > start) - __reserve_region_with_split(root, start, conflict->start-1, name); - if (!(conflict->flags & IORESOURCE_BUSY)) { - resource_size_t common_start, common_end; + /* failed, split and try again */ + kfree(res); - common_start = max(conflict->start, start); - common_end = min(conflict->end, end); - if (common_start < common_end) - __reserve_region_with_split(root, common_start, common_end, name); - } - if (conflict->end < end) - __reserve_region_with_split(root, conflict->end+1, end, name); - } + /* conflict covered whole area */ + if (conflict->start <= start && conflict->end >= end) + return; + if (conflict->start > start) + __reserve_region_with_split(root, start, conflict->start-1, name); + if (conflict->end < end) + __reserve_region_with_split(root, conflict->end+1, end, name); } void __init reserve_region_with_split(struct resource *root, diff --git a/kernel/sched.c b/kernel/sched.c index 9cdedbd..14a19b1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4728,7 +4728,7 @@ void account_process_tick(struct task_struct *p, int user_tick) if (user_tick) account_user_time(p, one_jiffy, one_jiffy_scaled); - else if (p != rq->idle) + else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) account_system_time(p, HARDIRQ_OFFSET, one_jiffy, one_jiffy_scaled); else @@ -4842,7 +4842,7 @@ void scheduler_tick(void) #endif } -unsigned long get_parent_ip(unsigned long addr) +notrace unsigned long get_parent_ip(unsigned long addr) { if (in_lock_functions(addr)) { addr = CALLER_ADDR2; @@ -7363,8 +7363,12 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, cpumask_or(groupmask, groupmask, sched_group_cpus(group)); cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); - printk(KERN_CONT " %s (__cpu_power = %d)", str, - group->__cpu_power); + + printk(KERN_CONT " %s", str); + if (group->__cpu_power != SCHED_LOAD_SCALE) { + printk(KERN_CONT " (__cpu_power = %d)", + group->__cpu_power); + } group = group->next; } while (group != sd->groups); diff --git a/kernel/slow-work.c b/kernel/slow-work.c index cf2bc01..b28d1913 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -609,14 +609,14 @@ void slow_work_unregister_user(void) if (slow_work_user_count == 0) { printk(KERN_NOTICE "Slow work thread pool: Shutting down\n"); slow_work_threads_should_exit = true; + del_timer_sync(&slow_work_cull_timer); + del_timer_sync(&slow_work_oom_timer); wake_up_all(&slow_work_thread_wq); wait_for_completion(&slow_work_last_thread_exited); printk(KERN_NOTICE "Slow work thread pool:" " Shut down complete\n"); } - del_timer_sync(&slow_work_cull_timer); - mutex_unlock(&slow_work_user_lock); } EXPORT_SYMBOL(slow_work_unregister_user); diff --git a/kernel/softirq.c b/kernel/softirq.c index d4ba347..dc4d0cf 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -471,9 +471,9 @@ void tasklet_kill(struct tasklet_struct *t) printk("Attempt to kill tasklet from interrupt\n"); while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { - do + do { yield(); - while (test_bit(TASKLET_STATE_SCHED, &t->state)); + } while (test_bit(TASKLET_STATE_SCHED, &t->state)); } tasklet_unlock_wait(t); clear_bit(TASKLET_STATE_SCHED, &t->state); diff --git a/kernel/sys.c b/kernel/sys.c index 51dbb55..e7998cf 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -360,6 +360,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg) { char buffer[256]; + int ret = 0; /* We only trust the superuser with rebooting the system. */ if (!capable(CAP_SYS_BOOT)) @@ -397,7 +398,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, kernel_halt(); unlock_kernel(); do_exit(0); - break; + panic("cannot halt"); case LINUX_REBOOT_CMD_POWER_OFF: kernel_power_off(); @@ -417,29 +418,22 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, #ifdef CONFIG_KEXEC case LINUX_REBOOT_CMD_KEXEC: - { - int ret; - ret = kernel_kexec(); - unlock_kernel(); - return ret; - } + ret = kernel_kexec(); + break; #endif #ifdef CONFIG_HIBERNATION case LINUX_REBOOT_CMD_SW_SUSPEND: - { - int ret = hibernate(); - unlock_kernel(); - return ret; - } + ret = hibernate(); + break; #endif default: - unlock_kernel(); - return -EINVAL; + ret = -EINVAL; + break; } unlock_kernel(); - return 0; + return ret; } static void deferred_cad(struct work_struct *dummy) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 4286b62..ea78fa1 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -103,6 +103,9 @@ static unsigned long one_ul = 1; static int one_hundred = 100; static int one_thousand = 1000; +/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ +static unsigned long dirty_bytes_min = 2 * PAGE_SIZE; + /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ static int maxolduid = 65535; static int minolduid; @@ -902,16 +905,6 @@ static struct ctl_table kern_table[] = { .proc_handler = &proc_dointvec, }, #endif -#ifdef CONFIG_UNEVICTABLE_LRU - { - .ctl_name = CTL_UNNUMBERED, - .procname = "scan_unevictable_pages", - .data = &scan_unevictable_pages, - .maxlen = sizeof(scan_unevictable_pages), - .mode = 0644, - .proc_handler = &scan_unevictable_handler, - }, -#endif #ifdef CONFIG_SLOW_WORK { .ctl_name = CTL_UNNUMBERED, @@ -1016,7 +1009,7 @@ static struct ctl_table vm_table[] = { .mode = 0644, .proc_handler = &dirty_bytes_handler, .strategy = &sysctl_intvec, - .extra1 = &one_ul, + .extra1 = &dirty_bytes_min, }, { .procname = "dirty_writeback_centisecs", @@ -1302,6 +1295,16 @@ static struct ctl_table vm_table[] = { .extra2 = &one, }, #endif +#ifdef CONFIG_UNEVICTABLE_LRU + { + .ctl_name = CTL_UNNUMBERED, + .procname = "scan_unevictable_pages", + .data = &scan_unevictable_pages, + .maxlen = sizeof(scan_unevictable_pages), + .mode = 0644, + .proc_handler = &scan_unevictable_handler, + }, +#endif /* * NOTE: do not add new entries to this table unless you have read * Documentation/sysctl/ctl_unnumbered.txt diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index c46c931..ecfd7b5 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -181,12 +181,12 @@ static void clocksource_watchdog(unsigned long data) resumed = test_and_clear_bit(0, &watchdog_resumed); - wdnow = watchdog->read(); + wdnow = watchdog->read(watchdog); wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); watchdog_last = wdnow; list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { - csnow = cs->read(); + csnow = cs->read(cs); if (unlikely(resumed)) { cs->wd_last = csnow; @@ -247,7 +247,7 @@ static void clocksource_check_watchdog(struct clocksource *cs) list_add(&cs->wd_list, &watchdog_list); if (!started && watchdog) { - watchdog_last = watchdog->read(); + watchdog_last = watchdog->read(watchdog); watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); @@ -268,7 +268,7 @@ static void clocksource_check_watchdog(struct clocksource *cs) cse->flags &= ~CLOCK_SOURCE_WATCHDOG; /* Start if list is not empty */ if (!list_empty(&watchdog_list)) { - watchdog_last = watchdog->read(); + watchdog_last = watchdog->read(watchdog); watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; add_timer_on(&watchdog_timer, diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 06f1975..c3f6c30 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -50,7 +50,7 @@ */ #define JIFFIES_SHIFT 8 -static cycle_t jiffies_read(void) +static cycle_t jiffies_read(struct clocksource *cs) { return (cycle_t) jiffies; } diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 21a5ca8..83c4417 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -93,7 +93,17 @@ void tick_handle_periodic(struct clock_event_device *dev) for (;;) { if (!clockevents_program_event(dev, next, ktime_get())) return; - tick_periodic(cpu); + /* + * Have to be careful here. If we're in oneshot mode, + * before we call tick_periodic() in a loop, we need + * to be sure we're using a real hardware clocksource. + * Otherwise we could get trapped in an infinite + * loop, as the tick_periodic() increments jiffies, + * when then will increment time, posibly causing + * the loop to trigger again and again. + */ + if (timekeeping_valid_for_hres()) + tick_periodic(cpu); next = ktime_add(next, tick_period); } } diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 900f1b6..687dff4 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -182,7 +182,7 @@ EXPORT_SYMBOL(do_settimeofday); */ static void change_clocksource(void) { - struct clocksource *new; + struct clocksource *new, *old; new = clocksource_get_next(); @@ -191,11 +191,16 @@ static void change_clocksource(void) clocksource_forward_now(); - new->raw_time = clock->raw_time; + if (clocksource_enable(new)) + return; + new->raw_time = clock->raw_time; + old = clock; clock = new; + clocksource_disable(old); + clock->cycle_last = 0; - clock->cycle_last = clocksource_read(new); + clock->cycle_last = clocksource_read(clock); clock->error = 0; clock->xtime_nsec = 0; clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); @@ -292,6 +297,7 @@ void __init timekeeping_init(void) ntp_init(); clock = clocksource_get_next(); + clocksource_enable(clock); clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); clock->cycle_last = clocksource_read(clock); diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 8e64e60..7a7a9fd 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@ -157,6 +157,13 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter, return TRACE_TYPE_HANDLED; } +static void branch_print_header(struct seq_file *s) +{ + seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT" + " FUNC:FILE:LINE\n"); + seq_puts(s, "# | | | | | " + " |\n"); +} static struct trace_event trace_branch_event = { .type = TRACE_BRANCH, @@ -171,6 +178,7 @@ static struct tracer branch_trace __read_mostly = #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_branch, #endif /* CONFIG_FTRACE_SELFTEST */ + .print_header = branch_print_header, }; __init static int init_branch_tracer(void) diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index 810a5b7..8a30d98 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c @@ -190,6 +190,12 @@ static enum print_line_t power_print_line(struct trace_iterator *iter) return TRACE_TYPE_UNHANDLED; } +static void power_print_header(struct seq_file *s) +{ + seq_puts(s, "# TIMESTAMP STATE EVENT\n"); + seq_puts(s, "# | | |\n"); +} + static struct tracer power_tracer __read_mostly = { .name = "power", @@ -198,6 +204,7 @@ static struct tracer power_tracer __read_mostly = .stop = stop_power_trace, .reset = power_trace_reset, .print_line = power_print_line, + .print_header = power_print_header, }; static int init_power_trace(void) |