From 547efeadd42a3c75e41e33c0637cba100fc18289 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 24 May 2017 10:15:19 +0200 Subject: x86/mtrr: Remove get_online_cpus() from mtrr_save_state() mtrr_save_state() is invoked from native_cpu_up() which is in the context of a CPU hotplug operation and therefor calling get_online_cpus() is pointless. While this works in the current get_online_cpus() implementation it prevents from converting the hotplug locking to percpu rwsems. Remove it. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Tested-by: Paul E. McKenney Acked-by: Ingo Molnar Cc: Peter Zijlstra Cc: Steven Rostedt Link: http://lkml.kernel.org/r/20170524081547.651378834@linutronix.de --- arch/x86/kernel/cpu/mtrr/main.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 2bce84d..c5bb63b 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -807,10 +807,8 @@ void mtrr_save_state(void) if (!mtrr_enabled()) return; - get_online_cpus(); first_cpu = cpumask_first(cpu_online_mask); smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1); - put_online_cpus(); } void set_mtrr_aps_delayed_init(void) -- cgit v1.1 From 04b247c2ebdd6ba1c46c7c22546229a89760b43a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 24 May 2017 10:15:24 +0200 Subject: perf/x86/intel/cqm: Use cpuhp_setup_state_cpuslocked() intel_cqm_init() holds get_online_cpus() while registerring the hotplug callbacks. cpuhp_setup_state() invokes get_online_cpus() as well. This is correct, but prevents the conversion of the hotplug locking to a percpu rwsem. Use cpuhp_setup_state_cpuslocked() to avoid the nested call. Convert *_online_cpus() to the new interfaces while at it. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Cc: Paul E. McKenney Cc: Fenghua Yu Cc: Peter Zijlstra Cc: Steven Rostedt Link: http://lkml.kernel.org/r/20170524081548.075604046@linutronix.de --- arch/x86/events/intel/cqm.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c index 8c00dc0..2521f77 100644 --- a/arch/x86/events/intel/cqm.c +++ b/arch/x86/events/intel/cqm.c @@ -1682,7 +1682,7 @@ static int __init intel_cqm_init(void) * * Also, check that the scales match on all cpus. */ - get_online_cpus(); + cpus_read_lock(); for_each_online_cpu(cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); @@ -1746,14 +1746,14 @@ static int __init intel_cqm_init(void) * Setup the hot cpu notifier once we are sure cqm * is enabled to avoid notifier leak. */ - cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_STARTING, - "perf/x86/cqm:starting", - intel_cqm_cpu_starting, NULL); - cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE, "perf/x86/cqm:online", - NULL, intel_cqm_cpu_exit); - + cpuhp_setup_state_cpuslocked(CPUHP_AP_PERF_X86_CQM_STARTING, + "perf/x86/cqm:starting", + intel_cqm_cpu_starting, NULL); + cpuhp_setup_state_cpuslocked(CPUHP_AP_PERF_X86_CQM_ONLINE, + "perf/x86/cqm:online", + NULL, intel_cqm_cpu_exit); out: - put_online_cpus(); + cpus_read_unlock(); if (ret) { kfree(str); -- cgit v1.1 From 27d3b157fee0bad264eb745d5c547e2e0676f1a2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 24 May 2017 10:15:29 +0200 Subject: x86/perf: Drop EXPORT of perf_check_microcode The only caller is the microcode update, which cannot be modular. Drop the export. Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Acked-by: Borislav Petkov Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Sebastian Siewior Cc: Steven Rostedt Cc: Borislav Petkov Link: http://lkml.kernel.org/r/20170524081548.515204988@linutronix.de --- arch/x86/events/core.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 580b60f..ac650d5 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2224,7 +2224,6 @@ void perf_check_microcode(void) if (x86_pmu.check_microcode) x86_pmu.check_microcode(); } -EXPORT_SYMBOL_GPL(perf_check_microcode); static struct pmu pmu = { .pmu_enable = x86_pmu_enable, -- cgit v1.1 From 1ba143a5216fb148211160a0ecc1f8d3f92f06bb Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 24 May 2017 10:15:30 +0200 Subject: perf/x86/intel: Drop get_online_cpus() in intel_snb_check_microcode() If intel_snb_check_microcode() is invoked via microcode_init -> perf_check_microcode -> intel_snb_check_microcode then get_online_cpus() is invoked nested. This works with the current implementation of get_online_cpus() but prevents converting it to a percpu rwsem. intel_snb_check_microcode() is also invoked from intel_sandybridge_quirk() unprotected. Drop get_online_cpus() from intel_snb_check_microcode() and add it to intel_sandybridge_quirk() so both call sites are protected. Convert *_online_cpus() to the new interfaces while at it. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Acked-by: Borislav Petkov Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Borislav Petkov Link: http://lkml.kernel.org/r/20170524081548.594862191@linutronix.de --- arch/x86/events/intel/core.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index a6d91d4..b9174aa 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3410,12 +3410,10 @@ static void intel_snb_check_microcode(void) int pebs_broken = 0; int cpu; - get_online_cpus(); for_each_online_cpu(cpu) { if ((pebs_broken = intel_snb_pebs_broken(cpu))) break; } - put_online_cpus(); if (pebs_broken == x86_pmu.pebs_broken) return; @@ -3488,7 +3486,9 @@ static bool check_msr(unsigned long msr, u64 mask) static __init void intel_sandybridge_quirk(void) { x86_pmu.check_microcode = intel_snb_check_microcode; + cpus_read_lock(); intel_snb_check_microcode(); + cpus_read_unlock(); } static const struct { int id; char *name; } intel_arch_events_map[] __initconst = { @@ -4112,13 +4112,12 @@ static __init int fixup_ht_bug(void) lockup_detector_resume(); - get_online_cpus(); + cpus_read_lock(); - for_each_online_cpu(c) { + for_each_online_cpu(c) free_excl_cntrs(c); - } - put_online_cpus(); + cpus_read_unlock(); pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n"); return 0; } -- cgit v1.1 From f2545b2d4ce13e068897ef60ae64dffe215f4152 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 24 May 2017 10:15:35 +0200 Subject: jump_label: Reorder hotplug lock and jump_label_lock The conversion of the hotplug locking to a percpu rwsem unearthed lock ordering issues all over the place. The jump_label code has two issues: 1) Nested get_online_cpus() invocations 2) Ordering problems vs. the cpus rwsem and the jump_label_mutex To cure these, the following lock order has been established; cpus_rwsem -> jump_label_lock -> text_mutex Even if not all architectures need protection against CPU hotplug, taking cpus_rwsem before jump_label_lock is now mandatory in code pathes which actually modify code and therefor need text_mutex protection. Move the get_online_cpus() invocations into the core jump label code and establish the proper lock order where required. Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Acked-by: "David S. Miller" Cc: Paul E. McKenney Cc: Chris Metcalf Cc: Peter Zijlstra Cc: Sebastian Siewior Cc: Steven Rostedt Cc: Jason Baron Cc: Ralf Baechle Link: http://lkml.kernel.org/r/20170524081549.025830817@linutronix.de --- arch/x86/kernel/jump_label.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index c37bd0f..ab4f491 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c @@ -105,11 +105,9 @@ static void __jump_label_transform(struct jump_entry *entry, void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { - get_online_cpus(); mutex_lock(&text_mutex); __jump_label_transform(entry, type, NULL, 0); mutex_unlock(&text_mutex); - put_online_cpus(); } static enum { -- cgit v1.1