diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpuset.c | 29 | ||||
-rw-r--r-- | kernel/events/core.c | 12 | ||||
-rw-r--r-- | kernel/jump_label.c | 3 | ||||
-rw-r--r-- | kernel/lockdep.c | 8 | ||||
-rw-r--r-- | kernel/printk.c | 3 | ||||
-rw-r--r-- | kernel/sched_fair.c | 14 | ||||
-rw-r--r-- | kernel/sysctl_binary.c | 2 | ||||
-rw-r--r-- | kernel/timer.c | 2 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 1 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 7 |
11 files changed, 62 insertions, 24 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 9fe58c4..0b1712d 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -123,6 +123,19 @@ static inline struct cpuset *task_cs(struct task_struct *task) struct cpuset, css); } +#ifdef CONFIG_NUMA +static inline bool task_has_mempolicy(struct task_struct *task) +{ + return task->mempolicy; +} +#else +static inline bool task_has_mempolicy(struct task_struct *task) +{ + return false; +} +#endif + + /* bits in struct cpuset flags field */ typedef enum { CS_CPU_EXCLUSIVE, @@ -949,7 +962,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, static void cpuset_change_task_nodemask(struct task_struct *tsk, nodemask_t *newmems) { - bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed); + bool need_loop; repeat: /* @@ -962,6 +975,14 @@ repeat: return; task_lock(tsk); + /* + * Determine if a loop is necessary if another thread is doing + * get_mems_allowed(). If at least one node remains unchanged and + * tsk does not have a mempolicy, then an empty nodemask will not be + * possible when mems_allowed is larger than a word. + */ + need_loop = task_has_mempolicy(tsk) || + !nodes_intersects(*newmems, tsk->mems_allowed); nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1); @@ -981,11 +1002,9 @@ repeat: /* * Allocation of memory is very fast, we needn't sleep when waiting - * for the read-side. No wait is necessary, however, if at least one - * node remains unchanged. + * for the read-side. */ - while (masks_disjoint && - ACCESS_ONCE(tsk->mems_allowed_change_disable)) { + while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) { task_unlock(tsk); if (!task_curr(tsk)) yield(); diff --git a/kernel/events/core.c b/kernel/events/core.c index 600c162..58690af 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2174,11 +2174,11 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, */ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); - perf_event_sched_in(cpuctx, ctx, task); - if (ctx->nr_events) cpuctx->task_ctx = ctx; + perf_event_sched_in(cpuctx, cpuctx->task_ctx, task); + perf_pmu_enable(ctx->pmu); perf_ctx_unlock(cpuctx, ctx); @@ -3558,9 +3558,13 @@ static void ring_buffer_wakeup(struct perf_event *event) rcu_read_lock(); rb = rcu_dereference(event->rb); - list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { + if (!rb) + goto unlock; + + list_for_each_entry_rcu(event, &rb->event_list, rb_entry) wake_up_all(&event->waitq); - } + +unlock: rcu_read_unlock(); } diff --git a/kernel/jump_label.c b/kernel/jump_label.c index bbdfe2a..66ff710 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -66,8 +66,9 @@ void jump_label_inc(struct jump_label_key *key) return; jump_label_lock(); - if (atomic_add_return(1, &key->enabled) == 1) + if (atomic_read(&key->enabled) == 0) jump_label_update(key, JUMP_LABEL_ENABLE); + atomic_inc(&key->enabled); jump_label_unlock(); } diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e69434b..b2e08c9 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -44,6 +44,7 @@ #include <linux/stringify.h> #include <linux/bitops.h> #include <linux/gfp.h> +#include <linux/kmemcheck.h> #include <asm/sections.h> @@ -2948,7 +2949,12 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, void lockdep_init_map(struct lockdep_map *lock, const char *name, struct lock_class_key *key, int subclass) { - memset(lock, 0, sizeof(*lock)); + int i; + + kmemcheck_mark_initialized(lock, sizeof(*lock)); + + for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) + lock->class_cache[i] = NULL; #ifdef CONFIG_LOCK_STAT lock->cpu = raw_smp_processor_id(); diff --git a/kernel/printk.c b/kernel/printk.c index 1455a0d..7982a0a 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -1293,10 +1293,11 @@ again: raw_spin_lock(&logbuf_lock); if (con_start != log_end) retry = 1; + raw_spin_unlock_irqrestore(&logbuf_lock, flags); + if (retry && console_trylock()) goto again; - raw_spin_unlock_irqrestore(&logbuf_lock, flags); if (wake_klogd) wake_up_klogd(); } diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a78ed27..8a39fa3 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -2352,13 +2352,11 @@ again: if (!smt && (sd->flags & SD_SHARE_CPUPOWER)) continue; - if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) { - if (!smt) { - smt = 1; - goto again; - } + if (smt && !(sd->flags & SD_SHARE_CPUPOWER)) + break; + + if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) break; - } sg = sd->groups; do { @@ -2378,6 +2376,10 @@ next: sg = sg->next; } while (sg != sd->groups); } + if (!smt) { + smt = 1; + goto again; + } done: rcu_read_unlock(); diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index 6318b51..a650694 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c @@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen, fput(file); out_putname: - putname(pathname); + __putname(pathname); out: return result; } diff --git a/kernel/timer.c b/kernel/timer.c index dbaa624..9c3c62b 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1368,7 +1368,7 @@ SYSCALL_DEFINE0(getppid) int pid; rcu_read_lock(); - pid = task_tgid_vnr(current->real_parent); + pid = task_tgid_vnr(rcu_dereference(current->real_parent)); rcu_read_unlock(); return pid; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 900b409..b1e8943 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -152,7 +152,6 @@ void clear_ftrace_function(void) ftrace_pid_function = ftrace_stub; } -#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST /* * For those archs that do not test ftrace_trace_stop in their @@ -1212,7 +1211,9 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, if (!src->count) { free_ftrace_hash_rcu(*dst); rcu_assign_pointer(*dst, EMPTY_HASH); - return 0; + /* still need to update the function records */ + ret = 0; + goto out; } /* diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 581876f..c212a7f 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1078,7 +1078,6 @@ event_subsystem_dir(const char *name, struct dentry *d_events) /* First see if we did not already create this dir */ list_for_each_entry(system, &event_subsystems, list) { if (strcmp(system->name, name) == 0) { - __get_system(system); system->nr_events++; return system->entry; } diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index d6e7926..95dc31e 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -1649,7 +1649,9 @@ static int replace_system_preds(struct event_subsystem *system, */ err = replace_preds(call, NULL, ps, filter_string, true); if (err) - goto fail; + call->flags |= TRACE_EVENT_FL_NO_SET_FILTER; + else + call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER; } list_for_each_entry(call, &ftrace_events, list) { @@ -1658,6 +1660,9 @@ static int replace_system_preds(struct event_subsystem *system, if (strcmp(call->class->system, system->name) != 0) continue; + if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER) + continue; + filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL); if (!filter_item) goto fail_mem; |