summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-09-18 09:24:01 +0200
committerIngo Molnar <mingo@kernel.org>2015-09-18 09:24:01 +0200
commit02386c356af0ce5bbee11ed9b23c312ca60298f0 (patch)
tree24d6816d783975cbc237af3a53c34f44fda03de2 /kernel
parentd71b0ad8d30922ccdd0705318237e6890b4ec1b6 (diff)
parentf73e22ab450140830005581c2c7ec389791a1b8d (diff)
downloadop-kernel-dev-02386c356af0ce5bbee11ed9b23c312ca60298f0.zip
op-kernel-dev-02386c356af0ce5bbee11ed9b23c312ca60298f0.tar.gz
Merge branch 'perf/urgent' into perf/core, to pick up fixes before applying new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c114
-rw-r--r--kernel/locking/qspinlock.c2
-rw-r--r--kernel/sched/core.c36
-rw-r--r--kernel/time/clockevents.c42
-rw-r--r--kernel/time/tick-common.c1
-rw-r--r--kernel/time/tick-sched.c15
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/time/timer_list.c54
8 files changed, 150 insertions, 116 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 55b0f7c..f87b434 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1243,11 +1243,7 @@ static inline void perf_event__state_init(struct perf_event *event)
PERF_EVENT_STATE_INACTIVE;
}
-/*
- * Called at perf_event creation and when events are attached/detached from a
- * group.
- */
-static void perf_event__read_size(struct perf_event *event)
+static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
{
int entry = sizeof(u64); /* value */
int size = 0;
@@ -1263,7 +1259,7 @@ static void perf_event__read_size(struct perf_event *event)
entry += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_GROUP) {
- nr += event->group_leader->nr_siblings;
+ nr += nr_siblings;
size += sizeof(u64);
}
@@ -1271,14 +1267,11 @@ static void perf_event__read_size(struct perf_event *event)
event->read_size = size;
}
-static void perf_event__header_size(struct perf_event *event)
+static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
{
struct perf_sample_data *data;
- u64 sample_type = event->attr.sample_type;
u16 size = 0;
- perf_event__read_size(event);
-
if (sample_type & PERF_SAMPLE_IP)
size += sizeof(data->ip);
@@ -1303,6 +1296,17 @@ static void perf_event__header_size(struct perf_event *event)
event->header_size = size;
}
+/*
+ * Called at perf_event creation and when events are attached/detached from a
+ * group.
+ */
+static void perf_event__header_size(struct perf_event *event)
+{
+ __perf_event_read_size(event,
+ event->group_leader->nr_siblings);
+ __perf_event_header_size(event, event->attr.sample_type);
+}
+
static void perf_event__id_header_size(struct perf_event *event)
{
struct perf_sample_data *data;
@@ -1330,6 +1334,27 @@ static void perf_event__id_header_size(struct perf_event *event)
event->id_header_size = size;
}
+static bool perf_event_validate_size(struct perf_event *event)
+{
+ /*
+ * The values computed here will be over-written when we actually
+ * attach the event.
+ */
+ __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
+ __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
+ perf_event__id_header_size(event);
+
+ /*
+ * Sum the lot; should not exceed the 64k limit we have on records.
+ * Conservative limit to allow for callchains and other variable fields.
+ */
+ if (event->read_size + event->header_size +
+ event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
+ return false;
+
+ return true;
+}
+
static void perf_group_attach(struct perf_event *event)
{
struct perf_event *group_leader = event->group_leader, *pos;
@@ -8411,13 +8436,35 @@ SYSCALL_DEFINE5(perf_event_open,
if (move_group) {
gctx = group_leader->ctx;
+ mutex_lock_double(&gctx->mutex, &ctx->mutex);
+ } else {
+ mutex_lock(&ctx->mutex);
+ }
+ if (!perf_event_validate_size(event)) {
+ err = -E2BIG;
+ goto err_locked;
+ }
+
+ /*
+ * Must be under the same ctx::mutex as perf_install_in_context(),
+ * because we need to serialize with concurrent event creation.
+ */
+ if (!exclusive_event_installable(event, ctx)) {
+ /* exclusive and group stuff are assumed mutually exclusive */
+ WARN_ON_ONCE(move_group);
+
+ err = -EBUSY;
+ goto err_locked;
+ }
+
+ WARN_ON_ONCE(ctx->parent_ctx);
+
+ if (move_group) {
/*
* See perf_event_ctx_lock() for comments on the details
* of swizzling perf_event::ctx.
*/
- mutex_lock_double(&gctx->mutex, &ctx->mutex);
-
perf_remove_from_context(group_leader, false);
list_for_each_entry(sibling, &group_leader->sibling_list,
@@ -8425,13 +8472,7 @@ SYSCALL_DEFINE5(perf_event_open,
perf_remove_from_context(sibling, false);
put_ctx(gctx);
}
- } else {
- mutex_lock(&ctx->mutex);
- }
- WARN_ON_ONCE(ctx->parent_ctx);
-
- if (move_group) {
/*
* Wait for everybody to stop referencing the events through
* the old lists, before installing it on new lists.
@@ -8463,22 +8504,29 @@ SYSCALL_DEFINE5(perf_event_open,
perf_event__state_init(group_leader);
perf_install_in_context(ctx, group_leader, group_leader->cpu);
get_ctx(ctx);
- }
- if (!exclusive_event_installable(event, ctx)) {
- err = -EBUSY;
- mutex_unlock(&ctx->mutex);
- fput(event_file);
- goto err_context;
+ /*
+ * Now that all events are installed in @ctx, nothing
+ * references @gctx anymore, so drop the last reference we have
+ * on it.
+ */
+ put_ctx(gctx);
}
+ /*
+ * Precalculate sample_data sizes; do while holding ctx::mutex such
+ * that we're serialized against further additions and before
+ * perf_install_in_context() which is the point the event is active and
+ * can use these values.
+ */
+ perf_event__header_size(event);
+ perf_event__id_header_size(event);
+
perf_install_in_context(ctx, event, event->cpu);
perf_unpin_context(ctx);
- if (move_group) {
+ if (move_group)
mutex_unlock(&gctx->mutex);
- put_ctx(gctx);
- }
mutex_unlock(&ctx->mutex);
put_online_cpus();
@@ -8490,12 +8538,6 @@ SYSCALL_DEFINE5(perf_event_open,
mutex_unlock(&current->perf_event_mutex);
/*
- * Precalculate sample_data sizes
- */
- perf_event__header_size(event);
- perf_event__id_header_size(event);
-
- /*
* Drop the reference on the group_event after placing the
* new event on the sibling_list. This ensures destruction
* of the group leader will find the pointer to itself in
@@ -8505,6 +8547,12 @@ SYSCALL_DEFINE5(perf_event_open,
fd_install(event_fd, event_file);
return event_fd;
+err_locked:
+ if (move_group)
+ mutex_unlock(&gctx->mutex);
+ mutex_unlock(&ctx->mutex);
+/* err_file: */
+ fput(event_file);
err_context:
perf_unpin_context(ctx);
put_ctx(ctx);
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 337c881..87e9ce6a 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -289,7 +289,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
if (pv_enabled())
goto queue;
- if (virt_queued_spin_lock(lock))
+ if (virt_spin_lock(lock))
return;
/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3595403..97d276f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -621,18 +621,21 @@ int get_nohz_timer_target(void)
int i, cpu = smp_processor_id();
struct sched_domain *sd;
- if (!idle_cpu(cpu))
+ if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
return cpu;
rcu_read_lock();
for_each_domain(cpu, sd) {
for_each_cpu(i, sched_domain_span(sd)) {
- if (!idle_cpu(i)) {
+ if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) {
cpu = i;
goto unlock;
}
}
}
+
+ if (!is_housekeeping_cpu(cpu))
+ cpu = housekeeping_any_cpu();
unlock:
rcu_read_unlock();
return cpu;
@@ -5178,24 +5181,47 @@ static void migrate_tasks(struct rq *dead_rq)
break;
/*
- * Ensure rq->lock covers the entire task selection
- * until the migration.
+ * pick_next_task assumes pinned rq->lock.
*/
lockdep_pin_lock(&rq->lock);
next = pick_next_task(rq, &fake_task);
BUG_ON(!next);
next->sched_class->put_prev_task(rq, next);
+ /*
+ * Rules for changing task_struct::cpus_allowed are holding
+ * both pi_lock and rq->lock, such that holding either
+ * stabilizes the mask.
+ *
+ * Drop rq->lock is not quite as disastrous as it usually is
+ * because !cpu_active at this point, which means load-balance
+ * will not interfere. Also, stop-machine.
+ */
+ lockdep_unpin_lock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
+ raw_spin_lock(&next->pi_lock);
+ raw_spin_lock(&rq->lock);
+
+ /*
+ * Since we're inside stop-machine, _nothing_ should have
+ * changed the task, WARN if weird stuff happened, because in
+ * that case the above rq->lock drop is a fail too.
+ */
+ if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
+ raw_spin_unlock(&next->pi_lock);
+ continue;
+ }
+
/* Find suitable destination for @next, with force if needed. */
dest_cpu = select_fallback_rq(dead_rq->cpu, next);
- lockdep_unpin_lock(&rq->lock);
rq = __migrate_task(rq, next, dest_cpu);
if (rq != dead_rq) {
raw_spin_unlock(&rq->lock);
rq = dead_rq;
raw_spin_lock(&rq->lock);
}
+ raw_spin_unlock(&next->pi_lock);
}
rq->stop = stop;
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 50eb107..a9b76a4 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -97,20 +97,6 @@ EXPORT_SYMBOL_GPL(clockevent_delta2ns);
static int __clockevents_switch_state(struct clock_event_device *dev,
enum clock_event_state state)
{
- /* Transition with legacy set_mode() callback */
- if (dev->set_mode) {
- /* Legacy callback doesn't support new modes */
- if (state > CLOCK_EVT_STATE_ONESHOT)
- return -ENOSYS;
- /*
- * 'clock_event_state' and 'clock_event_mode' have 1-to-1
- * mapping until *_ONESHOT, and so a simple cast will work.
- */
- dev->set_mode((enum clock_event_mode)state, dev);
- dev->mode = (enum clock_event_mode)state;
- return 0;
- }
-
if (dev->features & CLOCK_EVT_FEAT_DUMMY)
return 0;
@@ -204,12 +190,8 @@ int clockevents_tick_resume(struct clock_event_device *dev)
{
int ret = 0;
- if (dev->set_mode) {
- dev->set_mode(CLOCK_EVT_MODE_RESUME, dev);
- dev->mode = CLOCK_EVT_MODE_RESUME;
- } else if (dev->tick_resume) {
+ if (dev->tick_resume)
ret = dev->tick_resume(dev);
- }
return ret;
}
@@ -460,26 +442,6 @@ int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
}
EXPORT_SYMBOL_GPL(clockevents_unbind_device);
-/* Sanity check of state transition callbacks */
-static int clockevents_sanity_check(struct clock_event_device *dev)
-{
- /* Legacy set_mode() callback */
- if (dev->set_mode) {
- /* We shouldn't be supporting new modes now */
- WARN_ON(dev->set_state_periodic || dev->set_state_oneshot ||
- dev->set_state_shutdown || dev->tick_resume ||
- dev->set_state_oneshot_stopped);
-
- BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
- return 0;
- }
-
- if (dev->features & CLOCK_EVT_FEAT_DUMMY)
- return 0;
-
- return 0;
-}
-
/**
* clockevents_register_device - register a clock event device
* @dev: device to register
@@ -488,8 +450,6 @@ void clockevents_register_device(struct clock_event_device *dev)
{
unsigned long flags;
- BUG_ON(clockevents_sanity_check(dev));
-
/* Initialize state to DETACHED */
clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index d11c55b..4fcd99e 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -398,7 +398,6 @@ void tick_shutdown(unsigned int cpu)
* the set mode function!
*/
clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
- dev->mode = CLOCK_EVT_MODE_UNUSED;
clockevents_exchange_device(dev, NULL);
dev->event_handler = clockevents_handle_noop;
td->evtdev = NULL;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3319e16..7c7ec45 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -290,16 +290,17 @@ static int __init tick_nohz_full_setup(char *str)
__setup("nohz_full=", tick_nohz_full_setup);
static int tick_nohz_cpu_down_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+ unsigned long action,
+ void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
/*
- * If we handle the timekeeping duty for full dynticks CPUs,
- * we can't safely shutdown that CPU.
+ * The boot CPU handles housekeeping duty (unbound timers,
+ * workqueues, timekeeping, ...) on behalf of full dynticks
+ * CPUs. It must remain online when nohz full is enabled.
*/
if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
return NOTIFY_BAD;
@@ -370,6 +371,12 @@ void __init tick_nohz_init(void)
cpu_notifier(tick_nohz_cpu_down_callback, 0);
pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
cpumask_pr_args(tick_nohz_full_mask));
+
+ /*
+ * We need at least one CPU to handle housekeeping work such
+ * as timekeeping, unbound timers, workqueues, ...
+ */
+ WARN_ON_ONCE(cpumask_empty(housekeeping_mask));
}
#endif
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f6ee2e6..3739ac6 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1614,7 +1614,7 @@ static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
negative = (tick_error < 0);
/* Sort out the magnitude of the correction */
- tick_error = abs(tick_error);
+ tick_error = abs64(tick_error);
for (adj = 0; tick_error > interval; adj++)
tick_error >>= 1;
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 129c960..f75e35b 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -225,7 +225,7 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
(unsigned long long) dev->min_delta_ns);
SEQ_printf(m, " mult: %u\n", dev->mult);
SEQ_printf(m, " shift: %u\n", dev->shift);
- SEQ_printf(m, " mode: %d\n", dev->mode);
+ SEQ_printf(m, " mode: %d\n", clockevent_get_state(dev));
SEQ_printf(m, " next_event: %Ld nsecs\n",
(unsigned long long) ktime_to_ns(dev->next_event));
@@ -233,40 +233,34 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
print_name_offset(m, dev->set_next_event);
SEQ_printf(m, "\n");
- if (dev->set_mode) {
- SEQ_printf(m, " set_mode: ");
- print_name_offset(m, dev->set_mode);
+ if (dev->set_state_shutdown) {
+ SEQ_printf(m, " shutdown: ");
+ print_name_offset(m, dev->set_state_shutdown);
SEQ_printf(m, "\n");
- } else {
- if (dev->set_state_shutdown) {
- SEQ_printf(m, " shutdown: ");
- print_name_offset(m, dev->set_state_shutdown);
- SEQ_printf(m, "\n");
- }
+ }
- if (dev->set_state_periodic) {
- SEQ_printf(m, " periodic: ");
- print_name_offset(m, dev->set_state_periodic);
- SEQ_printf(m, "\n");
- }
+ if (dev->set_state_periodic) {
+ SEQ_printf(m, " periodic: ");
+ print_name_offset(m, dev->set_state_periodic);
+ SEQ_printf(m, "\n");
+ }
- if (dev->set_state_oneshot) {
- SEQ_printf(m, " oneshot: ");
- print_name_offset(m, dev->set_state_oneshot);
- SEQ_printf(m, "\n");
- }
+ if (dev->set_state_oneshot) {
+ SEQ_printf(m, " oneshot: ");
+ print_name_offset(m, dev->set_state_oneshot);
+ SEQ_printf(m, "\n");
+ }
- if (dev->set_state_oneshot_stopped) {
- SEQ_printf(m, " oneshot stopped: ");
- print_name_offset(m, dev->set_state_oneshot_stopped);
- SEQ_printf(m, "\n");
- }
+ if (dev->set_state_oneshot_stopped) {
+ SEQ_printf(m, " oneshot stopped: ");
+ print_name_offset(m, dev->set_state_oneshot_stopped);
+ SEQ_printf(m, "\n");
+ }
- if (dev->tick_resume) {
- SEQ_printf(m, " resume: ");
- print_name_offset(m, dev->tick_resume);
- SEQ_printf(m, "\n");
- }
+ if (dev->tick_resume) {
+ SEQ_printf(m, " resume: ");
+ print_name_offset(m, dev->tick_resume);
+ SEQ_printf(m, "\n");
}
SEQ_printf(m, " event_handler: ");
OpenPOWER on IntegriCloud