From 1fb9d6ad2766a1dd70d167552988375049a97f21 Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Fri, 5 Feb 2010 21:47:04 -0500 Subject: nmi_watchdog: Add new, generic implementation, using perf events This is a new generic nmi_watchdog implementation using the perf events infrastructure as suggested by Ingo. The implementation is simple, just create an in-kernel perf event and register an overflow handler to check for cpu lockups. I created a generic implementation that lives in kernel/ and the hardware specific part that for now lives in arch/x86. This approach has a number of advantages: - It simplifies the x86 PMU implementation in the long run, in that it removes the hardcoded low-level PMU implementation that was the NMI watchdog before. - It allows new NMI watchdog features to be added in a central place. - It allows other architectures to enable the NMI watchdog, as long as they have perf events (that provide NMIs) implemented. - It also allows for more graceful co-existence of existing perf events apps and the NMI watchdog - before these changes the relationship was exclusive. (The NMI watchdog will 'spend' a perf event when enabled. In later iterations we might be able to piggyback from an existing NMI event without having to allocate a hardware event for the NMI watchdog - turning this into a no-hardware-cost feature.) As for compatibility, we'll keep the old NMI watchdog code as well until the new one can 100% replace it on all CPUs, old and new alike. That might take some time as the NMI watchdog has been ported to many CPU models. I have done light testing to make sure the framework works correctly and it does. v2: Set the correct timeout values based on the old nmi watchdog Signed-off-by: Don Zickus Cc: Linus Torvalds Cc: Andrew Morton Cc: gorcunov@gmail.com Cc: aris@redhat.com Cc: peterz@infradead.org LKML-Reference: <1265424425-31562-3-git-send-email-dzickus@redhat.com> Signed-off-by: Ingo Molnar --- kernel/nmi_watchdog.c | 191 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 191 insertions(+) create mode 100644 kernel/nmi_watchdog.c (limited to 'kernel') diff --git a/kernel/nmi_watchdog.c b/kernel/nmi_watchdog.c new file mode 100644 index 0000000..36817b2 --- /dev/null +++ b/kernel/nmi_watchdog.c @@ -0,0 +1,191 @@ +/* + * Detect Hard Lockups using the NMI + * + * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. + * + * this code detects hard lockups: incidents in where on a CPU + * the kernel does not respond to anything except NMI. + * + * Note: Most of this code is borrowed heavily from softlockup.c, + * so thanks to Ingo for the initial implementation. + * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks + * to those contributors as well. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static DEFINE_PER_CPU(struct perf_event *, nmi_watchdog_ev); +static DEFINE_PER_CPU(int, nmi_watchdog_touch); +static DEFINE_PER_CPU(long, alert_counter); + +void touch_nmi_watchdog(void) +{ + __raw_get_cpu_var(nmi_watchdog_touch) = 1; + touch_softlockup_watchdog(); +} +EXPORT_SYMBOL(touch_nmi_watchdog); + +void touch_all_nmi_watchdog(void) +{ + int cpu; + + for_each_online_cpu(cpu) + per_cpu(nmi_watchdog_touch, cpu) = 1; + touch_softlockup_watchdog(); +} + +#ifdef CONFIG_SYSCTL +/* + * proc handler for /proc/sys/kernel/nmi_watchdog + */ +int proc_nmi_enabled(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) +{ + int cpu; + + if (per_cpu(nmi_watchdog_ev, smp_processor_id()) == NULL) + nmi_watchdog_enabled = 0; + else + nmi_watchdog_enabled = 1; + + touch_all_nmi_watchdog(); + proc_dointvec(table, write, buffer, length, ppos); + if (nmi_watchdog_enabled) + for_each_online_cpu(cpu) + perf_event_enable(per_cpu(nmi_watchdog_ev, cpu)); + else + for_each_online_cpu(cpu) + perf_event_disable(per_cpu(nmi_watchdog_ev, cpu)); + return 0; +} + +#endif /* CONFIG_SYSCTL */ + +struct perf_event_attr wd_attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .size = sizeof(struct perf_event_attr), + .pinned = 1, + .disabled = 1, +}; + +static int panic_on_timeout; + +void wd_overflow(struct perf_event *event, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + int touched = 0; + + if (__get_cpu_var(nmi_watchdog_touch)) { + per_cpu(nmi_watchdog_touch, cpu) = 0; + touched = 1; + } + + /* check to see if the cpu is doing anything */ + if (!touched && hw_nmi_is_cpu_stuck(regs)) { + /* + * Ayiee, looks like this CPU is stuck ... + * wait a few IRQs (5 seconds) before doing the oops ... + */ + per_cpu(alert_counter,cpu) += 1; + if (per_cpu(alert_counter,cpu) == 5) { + /* + * die_nmi will return ONLY if NOTIFY_STOP happens.. + */ + die_nmi("BUG: NMI Watchdog detected LOCKUP", + regs, panic_on_timeout); + } + } else { + per_cpu(alert_counter,cpu) = 0; + } + + return; +} + +/* + * Create/destroy watchdog threads as CPUs come and go: + */ +static int __cpuinit +cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) +{ + int hotcpu = (unsigned long)hcpu; + struct perf_event *event; + + switch (action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + per_cpu(nmi_watchdog_touch, hotcpu) = 0; + break; + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + /* originally wanted the below chunk to be in CPU_UP_PREPARE, but caps is unpriv for non-CPU0 */ + wd_attr.sample_period = cpu_khz * 1000; + event = perf_event_create_kernel_counter(&wd_attr, hotcpu, -1, wd_overflow); + if (IS_ERR(event)) { + printk(KERN_ERR "nmi watchdog failed to create perf event on %i: %p\n", hotcpu, event); + return NOTIFY_BAD; + } + per_cpu(nmi_watchdog_ev, hotcpu) = event; + perf_event_enable(per_cpu(nmi_watchdog_ev, hotcpu)); + break; +#ifdef CONFIG_HOTPLUG_CPU + case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: + perf_event_disable(per_cpu(nmi_watchdog_ev, hotcpu)); + case CPU_DEAD: + case CPU_DEAD_FROZEN: + event = per_cpu(nmi_watchdog_ev, hotcpu); + per_cpu(nmi_watchdog_ev, hotcpu) = NULL; + perf_event_release_kernel(event); + break; +#endif /* CONFIG_HOTPLUG_CPU */ + } + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata cpu_nfb = { + .notifier_call = cpu_callback +}; + +static int __initdata nonmi_watchdog; + +static int __init nonmi_watchdog_setup(char *str) +{ + nonmi_watchdog = 1; + return 1; +} +__setup("nonmi_watchdog", nonmi_watchdog_setup); + +static int __init spawn_nmi_watchdog_task(void) +{ + void *cpu = (void *)(long)smp_processor_id(); + int err; + + if (nonmi_watchdog) + return 0; + + err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); + if (err == NOTIFY_BAD) { + BUG(); + return 1; + } + cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); + register_cpu_notifier(&cpu_nfb); + + return 0; +} +early_initcall(spawn_nmi_watchdog_task); -- cgit v1.1 From 84e478c6f1eb9c4bfa1fff2f8108e9a061b46428 Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Fri, 5 Feb 2010 21:47:05 -0500 Subject: nmi_watchdog: Config option to enable new nmi_watchdog These are the bits that enable the new nmi_watchdog and safely isolate the old nmi_watchdog. Only one or the other can run, not both at the same time. Signed-off-by: Don Zickus Cc: Linus Torvalds Cc: Andrew Morton Cc: gorcunov@gmail.com Cc: aris@redhat.com Cc: peterz@infradead.org LKML-Reference: <1265424425-31562-4-git-send-email-dzickus@redhat.com> Signed-off-by: Ingo Molnar --- kernel/Makefile | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/Makefile b/kernel/Makefile index 864ff75..8a5abe5 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -76,6 +76,7 @@ obj-$(CONFIG_AUDIT_TREE) += audit_tree.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o +obj-$(CONFIG_NMI_WATCHDOG) += nmi_watchdog.o obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ obj-$(CONFIG_SECCOMP) += seccomp.o -- cgit v1.1 From 504d7cf10ee42bb76b9556859f23d4121dee0a77 Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Fri, 12 Feb 2010 17:19:19 -0500 Subject: nmi_watchdog: Compile and portability fixes The original patch was x86_64 centric. Changed the code to make it less so. ested by building and running on a powerpc. Signed-off-by: Don Zickus Cc: peterz@infradead.org Cc: gorcunov@gmail.com Cc: aris@redhat.com LKML-Reference: <1266013161-31197-2-git-send-email-dzickus@redhat.com> Signed-off-by: Ingo Molnar --- kernel/nmi_watchdog.c | 52 ++++++++++++++++++++++++++++++++++++++++----------- kernel/sysctl.c | 15 ++++++++++++++- 2 files changed, 55 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/nmi_watchdog.c b/kernel/nmi_watchdog.c index 36817b2..73c1954 100644 --- a/kernel/nmi_watchdog.c +++ b/kernel/nmi_watchdog.c @@ -30,6 +30,8 @@ static DEFINE_PER_CPU(struct perf_event *, nmi_watchdog_ev); static DEFINE_PER_CPU(int, nmi_watchdog_touch); static DEFINE_PER_CPU(long, alert_counter); +static int panic_on_timeout; + void touch_nmi_watchdog(void) { __raw_get_cpu_var(nmi_watchdog_touch) = 1; @@ -46,19 +48,49 @@ void touch_all_nmi_watchdog(void) touch_softlockup_watchdog(); } +static int __init setup_nmi_watchdog(char *str) +{ + if (!strncmp(str, "panic", 5)) { + panic_on_timeout = 1; + str = strchr(str, ','); + if (!str) + return 1; + ++str; + } + return 1; +} +__setup("nmi_watchdog=", setup_nmi_watchdog); + #ifdef CONFIG_SYSCTL /* * proc handler for /proc/sys/kernel/nmi_watchdog */ +int nmi_watchdog_enabled; + int proc_nmi_enabled(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { int cpu; - if (per_cpu(nmi_watchdog_ev, smp_processor_id()) == NULL) + if (!write) { + struct perf_event *event; + for_each_online_cpu(cpu) { + event = per_cpu(nmi_watchdog_ev, cpu); + if (event->state > PERF_EVENT_STATE_OFF) { + nmi_watchdog_enabled = 1; + break; + } + } + proc_dointvec(table, write, buffer, length, ppos); + return 0; + } + + if (per_cpu(nmi_watchdog_ev, smp_processor_id()) == NULL) { nmi_watchdog_enabled = 0; - else - nmi_watchdog_enabled = 1; + proc_dointvec(table, write, buffer, length, ppos); + printk("NMI watchdog failed configuration, can not be enabled\n"); + return 0; + } touch_all_nmi_watchdog(); proc_dointvec(table, write, buffer, length, ppos); @@ -81,8 +113,6 @@ struct perf_event_attr wd_attr = { .disabled = 1, }; -static int panic_on_timeout; - void wd_overflow(struct perf_event *event, int nmi, struct perf_sample_data *data, struct pt_regs *regs) @@ -103,11 +133,11 @@ void wd_overflow(struct perf_event *event, int nmi, */ per_cpu(alert_counter,cpu) += 1; if (per_cpu(alert_counter,cpu) == 5) { - /* - * die_nmi will return ONLY if NOTIFY_STOP happens.. - */ - die_nmi("BUG: NMI Watchdog detected LOCKUP", - regs, panic_on_timeout); + if (panic_on_timeout) { + panic("NMI Watchdog detected LOCKUP on cpu %d", cpu); + } else { + WARN(1, "NMI Watchdog detected LOCKUP on cpu %d", cpu); + } } } else { per_cpu(alert_counter,cpu) = 0; @@ -133,7 +163,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_ONLINE: case CPU_ONLINE_FROZEN: /* originally wanted the below chunk to be in CPU_UP_PREPARE, but caps is unpriv for non-CPU0 */ - wd_attr.sample_period = cpu_khz * 1000; + wd_attr.sample_period = hw_nmi_get_sample_period(); event = perf_event_create_kernel_counter(&wd_attr, hotcpu, -1, wd_overflow); if (IS_ERR(event)) { printk(KERN_ERR "nmi watchdog failed to create perf event on %i: %p\n", hotcpu, event); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 8a68b24..ac72c9e 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -60,6 +60,10 @@ #include #endif +#ifdef CONFIG_NMI_WATCHDOG +#include +#endif + #if defined(CONFIG_SYSCTL) @@ -692,7 +696,16 @@ static struct ctl_table kern_table[] = { .mode = 0444, .proc_handler = proc_dointvec, }, -#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) +#if defined(CONFIG_NMI_WATCHDOG) + { + .procname = "nmi_watchdog", + .data = &nmi_watchdog_enabled, + .maxlen = sizeof (int), + .mode = 0644, + .proc_handler = proc_nmi_enabled, + }, +#endif +#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) && !defined(CONFIG_NMI_WATCHDOG) { .procname = "unknown_nmi_panic", .data = &unknown_nmi_panic, -- cgit v1.1 From cf454aecb31741a0438ed1201b3dd153c7c7b19a Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Fri, 12 Feb 2010 17:19:20 -0500 Subject: nmi_watchdog: Fallback to software events when no hardware pmu detected Not all arches have a PMU or have perf_event support for their PMU. The nmi_watchdog will fail in those cases. Fallback to using software events to generate nmi_watchdog traffic with local apic interrupts. Tested on a Pentium4 and it worked as expected, excepting for detecting cpu lockups. The problem with using software events as a cpu lock up detector is the nmi_watchdog uses the logic that if local apic interrupts stop incrementing then the cpu is probably locked up. But with software events we use the local apic to trigger the nmi_watchdog callback to see if local apic interrupts are still firing, which obviously they are otherwise we wouldn't have been triggered. The algorithm to detect cpu lock ups is the same as the old nmi_watchdog. Perhaps we need to find a better way to detect lock ups? Signed-off-by: Don Zickus Cc: peterz@infradead.org Cc: gorcunov@gmail.com Cc: aris@redhat.com LKML-Reference: <1266013161-31197-3-git-send-email-dzickus@redhat.com> Signed-off-by: Ingo Molnar --- kernel/nmi_watchdog.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/nmi_watchdog.c b/kernel/nmi_watchdog.c index 73c1954..4f23505 100644 --- a/kernel/nmi_watchdog.c +++ b/kernel/nmi_watchdog.c @@ -166,8 +166,12 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) wd_attr.sample_period = hw_nmi_get_sample_period(); event = perf_event_create_kernel_counter(&wd_attr, hotcpu, -1, wd_overflow); if (IS_ERR(event)) { - printk(KERN_ERR "nmi watchdog failed to create perf event on %i: %p\n", hotcpu, event); - return NOTIFY_BAD; + wd_attr.type = PERF_TYPE_SOFTWARE; + event = perf_event_create_kernel_counter(&wd_attr, hotcpu, -1, wd_overflow); + if (IS_ERR(event)) { + printk(KERN_ERR "nmi watchdog failed to create perf event on %i: %p\n", hotcpu, event); + return NOTIFY_BAD; + } } per_cpu(nmi_watchdog_ev, hotcpu) = event; perf_event_enable(per_cpu(nmi_watchdog_ev, hotcpu)); -- cgit v1.1 From 6081b6cd9702967889de34fe5da1f96bb96d0ab8 Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Tue, 16 Feb 2010 17:04:52 -0500 Subject: nmi_watchdog: support for oprofile Re-arrange the code so that when someone disables nmi_watchdog with: echo 0 > /proc/sys/kernel/nmi_watchdog it releases the hardware reservation on the PMUs. This allows the oprofile module to grab those PMUs and do its thing. Otherwise oprofile fails to load because the hardware is reserved by the perf_events subsystem. Tested using: oprofile --vm-linux --start and watched it failed when nmi_watchdog is enabled and succeed when: oprofile --deinit && echo 0 > /proc/sys/kernel/nmi_watchdog is run. Note: this has the side quirk of having the nmi_watchdog latch onto the software events instead of hardware events if oprofile has already reserved the hardware first. User beware! :-) Signed-off-by: Don Zickus Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: gorcunov@gmail.com Cc: aris@redhat.com Cc: eranian@google.com LKML-Reference: <1266357892-30504-1-git-send-email-dzickus@redhat.com> Signed-off-by: Ingo Molnar --- kernel/nmi_watchdog.c | 144 ++++++++++++++++++++++++++++---------------------- 1 file changed, 82 insertions(+), 62 deletions(-) (limited to 'kernel') diff --git a/kernel/nmi_watchdog.c b/kernel/nmi_watchdog.c index 4f23505..633b230 100644 --- a/kernel/nmi_watchdog.c +++ b/kernel/nmi_watchdog.c @@ -61,50 +61,6 @@ static int __init setup_nmi_watchdog(char *str) } __setup("nmi_watchdog=", setup_nmi_watchdog); -#ifdef CONFIG_SYSCTL -/* - * proc handler for /proc/sys/kernel/nmi_watchdog - */ -int nmi_watchdog_enabled; - -int proc_nmi_enabled(struct ctl_table *table, int write, - void __user *buffer, size_t *length, loff_t *ppos) -{ - int cpu; - - if (!write) { - struct perf_event *event; - for_each_online_cpu(cpu) { - event = per_cpu(nmi_watchdog_ev, cpu); - if (event->state > PERF_EVENT_STATE_OFF) { - nmi_watchdog_enabled = 1; - break; - } - } - proc_dointvec(table, write, buffer, length, ppos); - return 0; - } - - if (per_cpu(nmi_watchdog_ev, smp_processor_id()) == NULL) { - nmi_watchdog_enabled = 0; - proc_dointvec(table, write, buffer, length, ppos); - printk("NMI watchdog failed configuration, can not be enabled\n"); - return 0; - } - - touch_all_nmi_watchdog(); - proc_dointvec(table, write, buffer, length, ppos); - if (nmi_watchdog_enabled) - for_each_online_cpu(cpu) - perf_event_enable(per_cpu(nmi_watchdog_ev, cpu)); - else - for_each_online_cpu(cpu) - perf_event_disable(per_cpu(nmi_watchdog_ev, cpu)); - return 0; -} - -#endif /* CONFIG_SYSCTL */ - struct perf_event_attr wd_attr = { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, @@ -146,6 +102,85 @@ void wd_overflow(struct perf_event *event, int nmi, return; } +static int enable_nmi_watchdog(int cpu) +{ + struct perf_event *event; + + event = per_cpu(nmi_watchdog_ev, cpu); + if (event && event->state > PERF_EVENT_STATE_OFF) + return 0; + + if (event == NULL) { + /* Try to register using hardware perf events first */ + wd_attr.sample_period = hw_nmi_get_sample_period(); + event = perf_event_create_kernel_counter(&wd_attr, cpu, -1, wd_overflow); + if (IS_ERR(event)) { + wd_attr.type = PERF_TYPE_SOFTWARE; + event = perf_event_create_kernel_counter(&wd_attr, cpu, -1, wd_overflow); + if (IS_ERR(event)) { + printk(KERN_ERR "nmi watchdog failed to create perf event on %i: %p\n", cpu, event); + return -1; + } + } + per_cpu(nmi_watchdog_ev, cpu) = event; + } + perf_event_enable(per_cpu(nmi_watchdog_ev, cpu)); + return 0; +} + +static void disable_nmi_watchdog(int cpu) +{ + struct perf_event *event; + + event = per_cpu(nmi_watchdog_ev, cpu); + if (event) { + perf_event_disable(per_cpu(nmi_watchdog_ev, cpu)); + per_cpu(nmi_watchdog_ev, cpu) = NULL; + perf_event_release_kernel(event); + } +} + +#ifdef CONFIG_SYSCTL +/* + * proc handler for /proc/sys/kernel/nmi_watchdog + */ +int nmi_watchdog_enabled; + +int proc_nmi_enabled(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) +{ + int cpu; + + if (!write) { + struct perf_event *event; + for_each_online_cpu(cpu) { + event = per_cpu(nmi_watchdog_ev, cpu); + if (event && event->state > PERF_EVENT_STATE_OFF) { + nmi_watchdog_enabled = 1; + break; + } + } + proc_dointvec(table, write, buffer, length, ppos); + return 0; + } + + touch_all_nmi_watchdog(); + proc_dointvec(table, write, buffer, length, ppos); + if (nmi_watchdog_enabled) { + for_each_online_cpu(cpu) + if (enable_nmi_watchdog(cpu)) { + printk("NMI watchdog failed configuration, " + " can not be enabled\n"); + } + } else { + for_each_online_cpu(cpu) + disable_nmi_watchdog(cpu); + } + return 0; +} + +#endif /* CONFIG_SYSCTL */ + /* * Create/destroy watchdog threads as CPUs come and go: */ @@ -153,7 +188,6 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int hotcpu = (unsigned long)hcpu; - struct perf_event *event; switch (action) { case CPU_UP_PREPARE: @@ -162,29 +196,15 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: - /* originally wanted the below chunk to be in CPU_UP_PREPARE, but caps is unpriv for non-CPU0 */ - wd_attr.sample_period = hw_nmi_get_sample_period(); - event = perf_event_create_kernel_counter(&wd_attr, hotcpu, -1, wd_overflow); - if (IS_ERR(event)) { - wd_attr.type = PERF_TYPE_SOFTWARE; - event = perf_event_create_kernel_counter(&wd_attr, hotcpu, -1, wd_overflow); - if (IS_ERR(event)) { - printk(KERN_ERR "nmi watchdog failed to create perf event on %i: %p\n", hotcpu, event); - return NOTIFY_BAD; - } - } - per_cpu(nmi_watchdog_ev, hotcpu) = event; - perf_event_enable(per_cpu(nmi_watchdog_ev, hotcpu)); + if (enable_nmi_watchdog(hotcpu)) + return NOTIFY_BAD; break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: - perf_event_disable(per_cpu(nmi_watchdog_ev, hotcpu)); + disable_nmi_watchdog(hotcpu); case CPU_DEAD: case CPU_DEAD_FROZEN: - event = per_cpu(nmi_watchdog_ev, hotcpu); - per_cpu(nmi_watchdog_ev, hotcpu) = NULL; - perf_event_release_kernel(event); break; #endif /* CONFIG_HOTPLUG_CPU */ } -- cgit v1.1 From 96ca4028aca0638d0e11dcbfabc4283054072933 Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Tue, 16 Feb 2010 17:02:25 -0500 Subject: nmi_watchdog: Properly configure for software events Paul Mackerras brought up a good point that when fallbacking to software events, I may have been lucky in my configuration. Modified the code to explicit provide a new configuration for software events. Suggested-by: Paul Mackerras Signed-off-by: Don Zickus Cc: gorcunov@gmail.com Cc: aris@redhat.com Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <1266357745-26671-1-git-send-email-dzickus@redhat.com> Signed-off-by: Ingo Molnar --- kernel/nmi_watchdog.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/nmi_watchdog.c b/kernel/nmi_watchdog.c index 633b230..3c75cbf 100644 --- a/kernel/nmi_watchdog.c +++ b/kernel/nmi_watchdog.c @@ -61,7 +61,7 @@ static int __init setup_nmi_watchdog(char *str) } __setup("nmi_watchdog=", setup_nmi_watchdog); -struct perf_event_attr wd_attr = { +struct perf_event_attr wd_hw_attr = { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, .size = sizeof(struct perf_event_attr), @@ -69,6 +69,14 @@ struct perf_event_attr wd_attr = { .disabled = 1, }; +struct perf_event_attr wd_sw_attr = { + .type = PERF_TYPE_SOFTWARE, + .config = PERF_COUNT_SW_CPU_CLOCK, + .size = sizeof(struct perf_event_attr), + .pinned = 1, + .disabled = 1, +}; + void wd_overflow(struct perf_event *event, int nmi, struct perf_sample_data *data, struct pt_regs *regs) @@ -105,6 +113,7 @@ void wd_overflow(struct perf_event *event, int nmi, static int enable_nmi_watchdog(int cpu) { struct perf_event *event; + struct perf_event_attr *wd_attr; event = per_cpu(nmi_watchdog_ev, cpu); if (event && event->state > PERF_EVENT_STATE_OFF) @@ -112,11 +121,15 @@ static int enable_nmi_watchdog(int cpu) if (event == NULL) { /* Try to register using hardware perf events first */ - wd_attr.sample_period = hw_nmi_get_sample_period(); - event = perf_event_create_kernel_counter(&wd_attr, cpu, -1, wd_overflow); + wd_attr = &wd_hw_attr; + wd_attr->sample_period = hw_nmi_get_sample_period(); + event = perf_event_create_kernel_counter(wd_attr, cpu, -1, wd_overflow); if (IS_ERR(event)) { - wd_attr.type = PERF_TYPE_SOFTWARE; - event = perf_event_create_kernel_counter(&wd_attr, cpu, -1, wd_overflow); + /* hardware doesn't exist or not supported, fallback to software events */ + printk("nmi_watchdog: hardware not available, trying software events\n"); + wd_attr = &wd_sw_attr; + wd_attr->sample_period = NSEC_PER_SEC; + event = perf_event_create_kernel_counter(wd_attr, cpu, -1, wd_overflow); if (IS_ERR(event)) { printk(KERN_ERR "nmi watchdog failed to create perf event on %i: %p\n", cpu, event); return -1; -- cgit v1.1 From 47195d57636604ff6048b0d7aa3e4ed9643f6073 Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Mon, 22 Feb 2010 18:09:03 -0500 Subject: nmi_watchdog: Clean up various small details Mostly copy/paste whitespace damage with a couple of nitpicks by the checkpatch script. Fix the struct definition as requested by Ingo too. Signed-off-by: Don Zickus Cc: peterz@infradead.org Cc: gorcunov@gmail.com Cc: aris@redhat.com LKML-Reference: <1266880143-24943-1-git-send-email-dzickus@redhat.com> Signed-off-by: Ingo Molnar -- arch/x86/kernel/apic/hw_nmi.c | 14 +++++------ arch/x86/kernel/traps.c | 6 ++-- include/linux/nmi.h | 2 - kernel/nmi_watchdog.c | 51 ++++++++++++++++++++---------------------- 4 files changed, 36 insertions(+), 37 deletions(-) --- kernel/nmi_watchdog.c | 51 +++++++++++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 26 deletions(-) (limited to 'kernel') diff --git a/kernel/nmi_watchdog.c b/kernel/nmi_watchdog.c index 3c75cbf..0a6f57f 100644 --- a/kernel/nmi_watchdog.c +++ b/kernel/nmi_watchdog.c @@ -50,31 +50,31 @@ void touch_all_nmi_watchdog(void) static int __init setup_nmi_watchdog(char *str) { - if (!strncmp(str, "panic", 5)) { - panic_on_timeout = 1; - str = strchr(str, ','); - if (!str) - return 1; - ++str; - } - return 1; + if (!strncmp(str, "panic", 5)) { + panic_on_timeout = 1; + str = strchr(str, ','); + if (!str) + return 1; + ++str; + } + return 1; } __setup("nmi_watchdog=", setup_nmi_watchdog); struct perf_event_attr wd_hw_attr = { - .type = PERF_TYPE_HARDWARE, - .config = PERF_COUNT_HW_CPU_CYCLES, - .size = sizeof(struct perf_event_attr), - .pinned = 1, - .disabled = 1, + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .size = sizeof(struct perf_event_attr), + .pinned = 1, + .disabled = 1, }; struct perf_event_attr wd_sw_attr = { - .type = PERF_TYPE_SOFTWARE, - .config = PERF_COUNT_SW_CPU_CLOCK, - .size = sizeof(struct perf_event_attr), - .pinned = 1, - .disabled = 1, + .type = PERF_TYPE_SOFTWARE, + .config = PERF_COUNT_SW_CPU_CLOCK, + .size = sizeof(struct perf_event_attr), + .pinned = 1, + .disabled = 1, }; void wd_overflow(struct perf_event *event, int nmi, @@ -95,16 +95,15 @@ void wd_overflow(struct perf_event *event, int nmi, * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ - per_cpu(alert_counter,cpu) += 1; - if (per_cpu(alert_counter,cpu) == 5) { - if (panic_on_timeout) { + per_cpu(alert_counter, cpu) += 1; + if (per_cpu(alert_counter, cpu) == 5) { + if (panic_on_timeout) panic("NMI Watchdog detected LOCKUP on cpu %d", cpu); - } else { + else WARN(1, "NMI Watchdog detected LOCKUP on cpu %d", cpu); - } } } else { - per_cpu(alert_counter,cpu) = 0; + per_cpu(alert_counter, cpu) = 0; } return; @@ -126,7 +125,7 @@ static int enable_nmi_watchdog(int cpu) event = perf_event_create_kernel_counter(wd_attr, cpu, -1, wd_overflow); if (IS_ERR(event)) { /* hardware doesn't exist or not supported, fallback to software events */ - printk("nmi_watchdog: hardware not available, trying software events\n"); + printk(KERN_INFO "nmi_watchdog: hardware not available, trying software events\n"); wd_attr = &wd_sw_attr; wd_attr->sample_period = NSEC_PER_SEC; event = perf_event_create_kernel_counter(wd_attr, cpu, -1, wd_overflow); @@ -182,7 +181,7 @@ int proc_nmi_enabled(struct ctl_table *table, int write, if (nmi_watchdog_enabled) { for_each_online_cpu(cpu) if (enable_nmi_watchdog(cpu)) { - printk("NMI watchdog failed configuration, " + printk(KERN_ERR "NMI watchdog failed configuration, " " can not be enabled\n"); } } else { -- cgit v1.1 From 5671a10e2bc7f99d9157c6044faf8be2ef302361 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Mar 2010 14:20:14 +0100 Subject: nmi_watchdog: Tell the world we're active Because I was wondering why perf stat wasn't working as expected.. Signed-off-by: Peter Zijlstra Cc: Don Zickus LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/nmi_watchdog.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/nmi_watchdog.c b/kernel/nmi_watchdog.c index 0a6f57f..a79d211 100644 --- a/kernel/nmi_watchdog.c +++ b/kernel/nmi_watchdog.c @@ -244,6 +244,8 @@ static int __init spawn_nmi_watchdog_task(void) if (nonmi_watchdog) return 0; + printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); + err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); if (err == NOTIFY_BAD) { BUG(); -- cgit v1.1 From 58687acba59266735adb8ccd9b5b9aa2c7cd205b Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Fri, 7 May 2010 17:11:44 -0400 Subject: lockup_detector: Combine nmi_watchdog and softlockup detector The new nmi_watchdog (which uses the perf event subsystem) is very similar in structure to the softlockup detector. Using Ingo's suggestion, I combined the two functionalities into one file: kernel/watchdog.c. Now both the nmi_watchdog (or hardlockup detector) and softlockup detector sit on top of the perf event subsystem, which is run every 60 seconds or so to see if there are any lockups. To detect hardlockups, cpus not responding to interrupts, I implemented an hrtimer that runs 5 times for every perf event overflow event. If that stops counting on a cpu, then the cpu is most likely in trouble. To detect softlockups, tasks not yielding to the scheduler, I used the previous kthread idea that now gets kicked every time the hrtimer fires. If the kthread isn't being scheduled neither is anyone else and the warning is printed to the console. I tested this on x86_64 and both the softlockup and hardlockup paths work. V2: - cleaned up the Kconfig and softlockup combination - surrounded hardlockup cases with #ifdef CONFIG_PERF_EVENTS_NMI - seperated out the softlockup case from perf event subsystem - re-arranged the enabling/disabling nmi watchdog from proc space - added cpumasks for hardlockup failure cases - removed fallback to soft events if no PMU exists for hard events V3: - comment cleanups - drop support for older softlockup code - per_cpu cleanups - completely remove software clock base hardlockup detector - use per_cpu masking on hard/soft lockup detection - #ifdef cleanups - rename config option NMI_WATCHDOG to LOCKUP_DETECTOR - documentation additions V4: - documentation fixes - convert per_cpu to __get_cpu_var - powerpc compile fixes V5: - split apart warn flags for hard and soft lockups TODO: - figure out how to make an arch-agnostic clock2cycles call (if possible) to feed into perf events as a sample period [fweisbec: merged conflict patch] Signed-off-by: Don Zickus Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Cyrill Gorcunov Cc: Eric Paris Cc: Randy Dunlap LKML-Reference: <1273266711-18706-2-git-send-email-dzickus@redhat.com> Signed-off-by: Frederic Weisbecker --- kernel/Makefile | 3 +- kernel/sysctl.c | 21 +- kernel/watchdog.c | 592 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 608 insertions(+), 8 deletions(-) create mode 100644 kernel/watchdog.c (limited to 'kernel') diff --git a/kernel/Makefile b/kernel/Makefile index d5c3006..6adeafc 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -76,9 +76,8 @@ obj-$(CONFIG_GCOV_KERNEL) += gcov/ obj-$(CONFIG_AUDIT_TREE) += audit_tree.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KGDB) += kgdb.o -obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o -obj-$(CONFIG_NMI_WATCHDOG) += nmi_watchdog.o obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o +obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ obj-$(CONFIG_SECCOMP) += seccomp.o obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o diff --git a/kernel/sysctl.c b/kernel/sysctl.c index a38af43..0f9adda 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -74,7 +74,7 @@ #include #endif -#ifdef CONFIG_NMI_WATCHDOG +#ifdef CONFIG_LOCKUP_DETECTOR #include #endif @@ -686,16 +686,25 @@ static struct ctl_table kern_table[] = { .mode = 0444, .proc_handler = proc_dointvec, }, -#if defined(CONFIG_NMI_WATCHDOG) +#if defined(CONFIG_LOCKUP_DETECTOR) { - .procname = "nmi_watchdog", - .data = &nmi_watchdog_enabled, + .procname = "watchdog", + .data = &watchdog_enabled, .maxlen = sizeof (int), .mode = 0644, - .proc_handler = proc_nmi_enabled, + .proc_handler = proc_dowatchdog_enabled, + }, + { + .procname = "watchdog_thresh", + .data = &softlockup_thresh, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dowatchdog_thresh, + .extra1 = &neg_one, + .extra2 = &sixty, }, #endif -#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) && !defined(CONFIG_NMI_WATCHDOG) +#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) && !defined(CONFIG_LOCKUP_DETECTOR) { .procname = "unknown_nmi_panic", .data = &unknown_nmi_panic, diff --git a/kernel/watchdog.c b/kernel/watchdog.c new file mode 100644 index 0000000..6b7fad8 --- /dev/null +++ b/kernel/watchdog.c @@ -0,0 +1,592 @@ +/* + * Detect hard and soft lockups on a system + * + * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. + * + * this code detects hard lockups: incidents in where on a CPU + * the kernel does not respond to anything except NMI. + * + * Note: Most of this code is borrowed heavily from softlockup.c, + * so thanks to Ingo for the initial implementation. + * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks + * to those contributors as well. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +int watchdog_enabled; +int __read_mostly softlockup_thresh = 60; + +static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); +static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); +static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); +static DEFINE_PER_CPU(bool, softlockup_touch_sync); +static DEFINE_PER_CPU(bool, hard_watchdog_warn); +static DEFINE_PER_CPU(bool, soft_watchdog_warn); +#ifdef CONFIG_PERF_EVENTS_NMI +static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); +static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); +static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); +#endif + +static int __read_mostly did_panic; +static int __initdata no_watchdog; + + +/* boot commands */ +/* + * Should we panic when a soft-lockup or hard-lockup occurs: + */ +#ifdef CONFIG_PERF_EVENTS_NMI +static int hardlockup_panic; + +static int __init hardlockup_panic_setup(char *str) +{ + if (!strncmp(str, "panic", 5)) + hardlockup_panic = 1; + return 1; +} +__setup("nmi_watchdog=", hardlockup_panic_setup); +#endif + +unsigned int __read_mostly softlockup_panic = + CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; + +static int __init softlockup_panic_setup(char *str) +{ + softlockup_panic = simple_strtoul(str, NULL, 0); + + return 1; +} +__setup("softlockup_panic=", softlockup_panic_setup); + +static int __init nowatchdog_setup(char *str) +{ + no_watchdog = 1; + return 1; +} +__setup("nowatchdog", nowatchdog_setup); + +/* deprecated */ +static int __init nosoftlockup_setup(char *str) +{ + no_watchdog = 1; + return 1; +} +__setup("nosoftlockup", nosoftlockup_setup); +/* */ + + +/* + * Returns seconds, approximately. We don't need nanosecond + * resolution, and we don't need to waste time with a big divide when + * 2^30ns == 1.074s. + */ +static unsigned long get_timestamp(int this_cpu) +{ + return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ +} + +static unsigned long get_sample_period(void) +{ + /* + * convert softlockup_thresh from seconds to ns + * the divide by 5 is to give hrtimer 5 chances to + * increment before the hardlockup detector generates + * a warning + */ + return softlockup_thresh / 5 * NSEC_PER_SEC; +} + +/* Commands for resetting the watchdog */ +static void __touch_watchdog(void) +{ + int this_cpu = raw_smp_processor_id(); + + __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu); +} + +void touch_watchdog(void) +{ + __get_cpu_var(watchdog_touch_ts) = 0; +} +EXPORT_SYMBOL(touch_watchdog); + +void touch_all_watchdog(void) +{ + int cpu; + + /* + * this is done lockless + * do we care if a 0 races with a timestamp? + * all it means is the softlock check starts one cycle later + */ + for_each_online_cpu(cpu) + per_cpu(watchdog_touch_ts, cpu) = 0; +} + +void touch_nmi_watchdog(void) +{ + touch_watchdog(); +} +EXPORT_SYMBOL(touch_nmi_watchdog); + +void touch_all_nmi_watchdog(void) +{ + touch_all_watchdog(); +} + +void touch_softlockup_watchdog(void) +{ + touch_watchdog(); +} + +void touch_all_softlockup_watchdogs(void) +{ + touch_all_watchdog(); +} + +void touch_softlockup_watchdog_sync(void) +{ + __raw_get_cpu_var(softlockup_touch_sync) = true; + __raw_get_cpu_var(watchdog_touch_ts) = 0; +} + +void softlockup_tick(void) +{ +} + +#ifdef CONFIG_PERF_EVENTS_NMI +/* watchdog detector functions */ +static int is_hardlockup(int cpu) +{ + unsigned long hrint = per_cpu(hrtimer_interrupts, cpu); + + if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint) + return 1; + + per_cpu(hrtimer_interrupts_saved, cpu) = hrint; + return 0; +} +#endif + +static int is_softlockup(unsigned long touch_ts, int cpu) +{ + unsigned long now = get_timestamp(cpu); + + /* Warn about unreasonable delays: */ + if (time_after(now, touch_ts + softlockup_thresh)) + return now - touch_ts; + + return 0; +} + +static int +watchdog_panic(struct notifier_block *this, unsigned long event, void *ptr) +{ + did_panic = 1; + + return NOTIFY_DONE; +} + +static struct notifier_block panic_block = { + .notifier_call = watchdog_panic, +}; + +#ifdef CONFIG_PERF_EVENTS_NMI +static struct perf_event_attr wd_hw_attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .size = sizeof(struct perf_event_attr), + .pinned = 1, + .disabled = 1, +}; + +/* Callback function for perf event subsystem */ +void watchdog_overflow_callback(struct perf_event *event, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + int this_cpu = smp_processor_id(); + unsigned long touch_ts = per_cpu(watchdog_touch_ts, this_cpu); + + if (touch_ts == 0) { + __touch_watchdog(); + return; + } + + /* check for a hardlockup + * This is done by making sure our timer interrupt + * is incrementing. The timer interrupt should have + * fired multiple times before we overflow'd. If it hasn't + * then this is a good indication the cpu is stuck + */ + if (is_hardlockup(this_cpu)) { + /* only print hardlockups once */ + if (__get_cpu_var(hard_watchdog_warn) == true) + return; + + if (hardlockup_panic) + panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); + else + WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); + + __get_cpu_var(hard_watchdog_warn) = true; + return; + } + + __get_cpu_var(hard_watchdog_warn) = false; + return; +} +static void watchdog_interrupt_count(void) +{ + __get_cpu_var(hrtimer_interrupts)++; +} +#else +static inline void watchdog_interrupt_count(void) { return; } +#endif /* CONFIG_PERF_EVENTS_NMI */ + +/* watchdog kicker functions */ +static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) +{ + int this_cpu = smp_processor_id(); + unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts); + struct pt_regs *regs = get_irq_regs(); + int duration; + + /* kick the hardlockup detector */ + watchdog_interrupt_count(); + + /* kick the softlockup detector */ + wake_up_process(__get_cpu_var(softlockup_watchdog)); + + /* .. and repeat */ + hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); + + if (touch_ts == 0) { + if (unlikely(per_cpu(softlockup_touch_sync, this_cpu))) { + /* + * If the time stamp was touched atomically + * make sure the scheduler tick is up to date. + */ + per_cpu(softlockup_touch_sync, this_cpu) = false; + sched_clock_tick(); + } + __touch_watchdog(); + return HRTIMER_RESTART; + } + + /* check for a softlockup + * This is done by making sure a high priority task is + * being scheduled. The task touches the watchdog to + * indicate it is getting cpu time. If it hasn't then + * this is a good indication some task is hogging the cpu + */ + duration = is_softlockup(touch_ts, this_cpu); + if (unlikely(duration)) { + /* only warn once */ + if (__get_cpu_var(soft_watchdog_warn) == true) + return HRTIMER_RESTART; + + printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", + this_cpu, duration, + current->comm, task_pid_nr(current)); + print_modules(); + print_irqtrace_events(current); + if (regs) + show_regs(regs); + else + dump_stack(); + + if (softlockup_panic) + panic("softlockup: hung tasks"); + __get_cpu_var(soft_watchdog_warn) = true; + } else + __get_cpu_var(soft_watchdog_warn) = false; + + return HRTIMER_RESTART; +} + + +/* + * The watchdog thread - touches the timestamp. + */ +static int watchdog(void *__bind_cpu) +{ + struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, (unsigned long)__bind_cpu); + + sched_setscheduler(current, SCHED_FIFO, ¶m); + + /* initialize timestamp */ + __touch_watchdog(); + + /* kick off the timer for the hardlockup detector */ + /* done here because hrtimer_start can only pin to smp_processor_id() */ + hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()), + HRTIMER_MODE_REL_PINNED); + + set_current_state(TASK_INTERRUPTIBLE); + /* + * Run briefly once per second to reset the softlockup timestamp. + * If this gets delayed for more than 60 seconds then the + * debug-printout triggers in softlockup_tick(). + */ + while (!kthread_should_stop()) { + __touch_watchdog(); + schedule(); + + if (kthread_should_stop()) + break; + + set_current_state(TASK_INTERRUPTIBLE); + } + __set_current_state(TASK_RUNNING); + + return 0; +} + + +#ifdef CONFIG_PERF_EVENTS_NMI +static int watchdog_nmi_enable(int cpu) +{ + struct perf_event_attr *wd_attr; + struct perf_event *event = per_cpu(watchdog_ev, cpu); + + /* is it already setup and enabled? */ + if (event && event->state > PERF_EVENT_STATE_OFF) + goto out; + + /* it is setup but not enabled */ + if (event != NULL) + goto out_enable; + + /* Try to register using hardware perf events */ + wd_attr = &wd_hw_attr; + wd_attr->sample_period = hw_nmi_get_sample_period(); + event = perf_event_create_kernel_counter(wd_attr, cpu, -1, watchdog_overflow_callback); + if (!IS_ERR(event)) { + printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); + goto out_save; + } + + printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event); + return -1; + + /* success path */ +out_save: + per_cpu(watchdog_ev, cpu) = event; +out_enable: + perf_event_enable(per_cpu(watchdog_ev, cpu)); +out: + return 0; +} + +static void watchdog_nmi_disable(int cpu) +{ + struct perf_event *event = per_cpu(watchdog_ev, cpu); + + if (event) { + perf_event_disable(event); + per_cpu(watchdog_ev, cpu) = NULL; + + /* should be in cleanup, but blocks oprofile */ + perf_event_release_kernel(event); + } + return; +} +#else +static int watchdog_nmi_enable(int cpu) { return 0; } +static void watchdog_nmi_disable(int cpu) { return; } +#endif /* CONFIG_PERF_EVENTS_NMI */ + +/* prepare/enable/disable routines */ +static int watchdog_prepare_cpu(int cpu) +{ + struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); + + WARN_ON(per_cpu(softlockup_watchdog, cpu)); + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer->function = watchdog_timer_fn; + + return 0; +} + +static int watchdog_enable(int cpu) +{ + struct task_struct *p = per_cpu(softlockup_watchdog, cpu); + + /* enable the perf event */ + if (watchdog_nmi_enable(cpu) != 0) + return -1; + + /* create the watchdog thread */ + if (!p) { + p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); + if (IS_ERR(p)) { + printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); + return -1; + } + kthread_bind(p, cpu); + per_cpu(watchdog_touch_ts, cpu) = 0; + per_cpu(softlockup_watchdog, cpu) = p; + wake_up_process(p); + } + + return 0; +} + +static void watchdog_disable(int cpu) +{ + struct task_struct *p = per_cpu(softlockup_watchdog, cpu); + struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); + + /* + * cancel the timer first to stop incrementing the stats + * and waking up the kthread + */ + hrtimer_cancel(hrtimer); + + /* disable the perf event */ + watchdog_nmi_disable(cpu); + + /* stop the watchdog thread */ + if (p) { + per_cpu(softlockup_watchdog, cpu) = NULL; + kthread_stop(p); + } + + /* if any cpu succeeds, watchdog is considered enabled for the system */ + watchdog_enabled = 1; +} + +static void watchdog_enable_all_cpus(void) +{ + int cpu; + int result; + + for_each_online_cpu(cpu) + result += watchdog_enable(cpu); + + if (result) + printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n"); + +} + +static void watchdog_disable_all_cpus(void) +{ + int cpu; + + for_each_online_cpu(cpu) + watchdog_disable(cpu); + + /* if all watchdogs are disabled, then they are disabled for the system */ + watchdog_enabled = 0; +} + + +/* sysctl functions */ +#ifdef CONFIG_SYSCTL +/* + * proc handler for /proc/sys/kernel/nmi_watchdog + */ + +int proc_dowatchdog_enabled(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) +{ + proc_dointvec(table, write, buffer, length, ppos); + + if (watchdog_enabled) + watchdog_enable_all_cpus(); + else + watchdog_disable_all_cpus(); + return 0; +} + +int proc_dowatchdog_thresh(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + return proc_dointvec_minmax(table, write, buffer, lenp, ppos); +} + +/* stub functions */ +int proc_dosoftlockup_thresh(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + return proc_dowatchdog_thresh(table, write, buffer, lenp, ppos); +} +/* end of stub functions */ +#endif /* CONFIG_SYSCTL */ + + +/* + * Create/destroy watchdog threads as CPUs come and go: + */ +static int __cpuinit +cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) +{ + int hotcpu = (unsigned long)hcpu; + + switch (action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + if (watchdog_prepare_cpu(hotcpu)) + return NOTIFY_BAD; + break; + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + if (watchdog_enable(hotcpu)) + return NOTIFY_BAD; + break; +#ifdef CONFIG_HOTPLUG_CPU + case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: + watchdog_disable(hotcpu); + break; + case CPU_DEAD: + case CPU_DEAD_FROZEN: + watchdog_disable(hotcpu); + break; +#endif /* CONFIG_HOTPLUG_CPU */ + } + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata cpu_nfb = { + .notifier_call = cpu_callback +}; + +static int __init spawn_watchdog_task(void) +{ + void *cpu = (void *)(long)smp_processor_id(); + int err; + + if (no_watchdog) + return 0; + + err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); + WARN_ON(err == NOTIFY_BAD); + + cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); + register_cpu_notifier(&cpu_nfb); + + atomic_notifier_chain_register(&panic_notifier_list, &panic_block); + + return 0; +} +early_initcall(spawn_watchdog_task); -- cgit v1.1 From 332fbdbca3f7716c5620970755ae054d213bcc4e Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Fri, 7 May 2010 17:11:45 -0400 Subject: lockup_detector: Touch_softlockup cleanups and softlockup_tick removal Just some code cleanup to make touch_softlockup clearer and remove the softlockup_tick function as it is no longer needed. Also remove the /proc softlockup_thres call as it has been changed to watchdog_thres. Signed-off-by: Don Zickus Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Cyrill Gorcunov Cc: Eric Paris Cc: Randy Dunlap LKML-Reference: <1273266711-18706-3-git-send-email-dzickus@redhat.com> Signed-off-by: Frederic Weisbecker --- kernel/sysctl.c | 9 --------- kernel/timer.c | 1 - kernel/watchdog.c | 35 +++-------------------------------- 3 files changed, 3 insertions(+), 42 deletions(-) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 0f9adda..999bc3f 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -817,15 +817,6 @@ static struct ctl_table kern_table[] = { .extra1 = &zero, .extra2 = &one, }, - { - .procname = "softlockup_thresh", - .data = &softlockup_thresh, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dosoftlockup_thresh, - .extra1 = &neg_one, - .extra2 = &sixty, - }, #endif #ifdef CONFIG_DETECT_HUNG_TASK { diff --git a/kernel/timer.c b/kernel/timer.c index aeb6a54..e8de5eb 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1225,7 +1225,6 @@ void run_local_timers(void) { hrtimer_run_queues(); raise_softirq(TIMER_SOFTIRQ); - softlockup_tick(); } /* diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 6b7fad8..f1541b7 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -119,13 +119,12 @@ static void __touch_watchdog(void) __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu); } -void touch_watchdog(void) +void touch_softlockup_watchdog(void) { __get_cpu_var(watchdog_touch_ts) = 0; } -EXPORT_SYMBOL(touch_watchdog); -void touch_all_watchdog(void) +void touch_all_softlockup_watchdogs(void) { int cpu; @@ -140,35 +139,16 @@ void touch_all_watchdog(void) void touch_nmi_watchdog(void) { - touch_watchdog(); + touch_softlockup_watchdog(); } EXPORT_SYMBOL(touch_nmi_watchdog); -void touch_all_nmi_watchdog(void) -{ - touch_all_watchdog(); -} - -void touch_softlockup_watchdog(void) -{ - touch_watchdog(); -} - -void touch_all_softlockup_watchdogs(void) -{ - touch_all_watchdog(); -} - void touch_softlockup_watchdog_sync(void) { __raw_get_cpu_var(softlockup_touch_sync) = true; __raw_get_cpu_var(watchdog_touch_ts) = 0; } -void softlockup_tick(void) -{ -} - #ifdef CONFIG_PERF_EVENTS_NMI /* watchdog detector functions */ static int is_hardlockup(int cpu) @@ -522,15 +502,6 @@ int proc_dowatchdog_thresh(struct ctl_table *table, int write, { return proc_dointvec_minmax(table, write, buffer, lenp, ppos); } - -/* stub functions */ -int proc_dosoftlockup_thresh(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos) -{ - return proc_dowatchdog_thresh(table, write, buffer, lenp, ppos); -} -/* end of stub functions */ #endif /* CONFIG_SYSCTL */ -- cgit v1.1 From 2508ce1845a3b256798532b2c6b7997c2dc6533b Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Fri, 7 May 2010 17:11:46 -0400 Subject: lockup_detector: Remove old softlockup code Now that is no longer compiled or used, just remove it. Also move some of the code wrapped with DETECT_SOFTLOCKUP to the LOCKUP_DETECTOR wrappers because that is the code that uses it now. Signed-off-by: Don Zickus Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Cyrill Gorcunov Cc: Eric Paris Cc: Randy Dunlap LKML-Reference: <1273266711-18706-4-git-send-email-dzickus@redhat.com> Signed-off-by: Frederic Weisbecker --- kernel/softlockup.c | 293 ---------------------------------------------------- kernel/sysctl.c | 22 ++-- 2 files changed, 10 insertions(+), 305 deletions(-) delete mode 100644 kernel/softlockup.c (limited to 'kernel') diff --git a/kernel/softlockup.c b/kernel/softlockup.c deleted file mode 100644 index 4b493f6..0000000 --- a/kernel/softlockup.c +++ /dev/null @@ -1,293 +0,0 @@ -/* - * Detect Soft Lockups - * - * started by Ingo Molnar, Copyright (C) 2005, 2006 Red Hat, Inc. - * - * this code detects soft lockups: incidents in where on a CPU - * the kernel does not reschedule for 10 seconds or more. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -static DEFINE_SPINLOCK(print_lock); - -static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ -static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ -static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); -static DEFINE_PER_CPU(bool, softlock_touch_sync); - -static int __read_mostly did_panic; -int __read_mostly softlockup_thresh = 60; - -/* - * Should we panic (and reboot, if panic_timeout= is set) when a - * soft-lockup occurs: - */ -unsigned int __read_mostly softlockup_panic = - CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; - -static int __init softlockup_panic_setup(char *str) -{ - softlockup_panic = simple_strtoul(str, NULL, 0); - - return 1; -} -__setup("softlockup_panic=", softlockup_panic_setup); - -static int -softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) -{ - did_panic = 1; - - return NOTIFY_DONE; -} - -static struct notifier_block panic_block = { - .notifier_call = softlock_panic, -}; - -/* - * Returns seconds, approximately. We don't need nanosecond - * resolution, and we don't need to waste time with a big divide when - * 2^30ns == 1.074s. - */ -static unsigned long get_timestamp(int this_cpu) -{ - return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ -} - -static void __touch_softlockup_watchdog(void) -{ - int this_cpu = raw_smp_processor_id(); - - __raw_get_cpu_var(softlockup_touch_ts) = get_timestamp(this_cpu); -} - -void touch_softlockup_watchdog(void) -{ - __raw_get_cpu_var(softlockup_touch_ts) = 0; -} -EXPORT_SYMBOL(touch_softlockup_watchdog); - -void touch_softlockup_watchdog_sync(void) -{ - __raw_get_cpu_var(softlock_touch_sync) = true; - __raw_get_cpu_var(softlockup_touch_ts) = 0; -} - -void touch_all_softlockup_watchdogs(void) -{ - int cpu; - - /* Cause each CPU to re-update its timestamp rather than complain */ - for_each_online_cpu(cpu) - per_cpu(softlockup_touch_ts, cpu) = 0; -} -EXPORT_SYMBOL(touch_all_softlockup_watchdogs); - -int proc_dosoftlockup_thresh(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos) -{ - touch_all_softlockup_watchdogs(); - return proc_dointvec_minmax(table, write, buffer, lenp, ppos); -} - -/* - * This callback runs from the timer interrupt, and checks - * whether the watchdog thread has hung or not: - */ -void softlockup_tick(void) -{ - int this_cpu = smp_processor_id(); - unsigned long touch_ts = per_cpu(softlockup_touch_ts, this_cpu); - unsigned long print_ts; - struct pt_regs *regs = get_irq_regs(); - unsigned long now; - - /* Is detection switched off? */ - if (!per_cpu(softlockup_watchdog, this_cpu) || softlockup_thresh <= 0) { - /* Be sure we don't false trigger if switched back on */ - if (touch_ts) - per_cpu(softlockup_touch_ts, this_cpu) = 0; - return; - } - - if (touch_ts == 0) { - if (unlikely(per_cpu(softlock_touch_sync, this_cpu))) { - /* - * If the time stamp was touched atomically - * make sure the scheduler tick is up to date. - */ - per_cpu(softlock_touch_sync, this_cpu) = false; - sched_clock_tick(); - } - __touch_softlockup_watchdog(); - return; - } - - print_ts = per_cpu(softlockup_print_ts, this_cpu); - - /* report at most once a second */ - if (print_ts == touch_ts || did_panic) - return; - - /* do not print during early bootup: */ - if (unlikely(system_state != SYSTEM_RUNNING)) { - __touch_softlockup_watchdog(); - return; - } - - now = get_timestamp(this_cpu); - - /* - * Wake up the high-prio watchdog task twice per - * threshold timespan. - */ - if (time_after(now - softlockup_thresh/2, touch_ts)) - wake_up_process(per_cpu(softlockup_watchdog, this_cpu)); - - /* Warn about unreasonable delays: */ - if (time_before_eq(now - softlockup_thresh, touch_ts)) - return; - - per_cpu(softlockup_print_ts, this_cpu) = touch_ts; - - spin_lock(&print_lock); - printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", - this_cpu, now - touch_ts, - current->comm, task_pid_nr(current)); - print_modules(); - print_irqtrace_events(current); - if (regs) - show_regs(regs); - else - dump_stack(); - spin_unlock(&print_lock); - - if (softlockup_panic) - panic("softlockup: hung tasks"); -} - -/* - * The watchdog thread - runs every second and touches the timestamp. - */ -static int watchdog(void *__bind_cpu) -{ - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; - - sched_setscheduler(current, SCHED_FIFO, ¶m); - - /* initialize timestamp */ - __touch_softlockup_watchdog(); - - set_current_state(TASK_INTERRUPTIBLE); - /* - * Run briefly once per second to reset the softlockup timestamp. - * If this gets delayed for more than 60 seconds then the - * debug-printout triggers in softlockup_tick(). - */ - while (!kthread_should_stop()) { - __touch_softlockup_watchdog(); - schedule(); - - if (kthread_should_stop()) - break; - - set_current_state(TASK_INTERRUPTIBLE); - } - __set_current_state(TASK_RUNNING); - - return 0; -} - -/* - * Create/destroy watchdog threads as CPUs come and go: - */ -static int __cpuinit -cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) -{ - int hotcpu = (unsigned long)hcpu; - struct task_struct *p; - - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - BUG_ON(per_cpu(softlockup_watchdog, hotcpu)); - p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu); - if (IS_ERR(p)) { - printk(KERN_ERR "watchdog for %i failed\n", hotcpu); - return NOTIFY_BAD; - } - per_cpu(softlockup_touch_ts, hotcpu) = 0; - per_cpu(softlockup_watchdog, hotcpu) = p; - kthread_bind(p, hotcpu); - break; - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - wake_up_process(per_cpu(softlockup_watchdog, hotcpu)); - break; -#ifdef CONFIG_HOTPLUG_CPU - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - if (!per_cpu(softlockup_watchdog, hotcpu)) - break; - /* Unbind so it can run. Fall thru. */ - kthread_bind(per_cpu(softlockup_watchdog, hotcpu), - cpumask_any(cpu_online_mask)); - case CPU_DEAD: - case CPU_DEAD_FROZEN: - p = per_cpu(softlockup_watchdog, hotcpu); - per_cpu(softlockup_watchdog, hotcpu) = NULL; - kthread_stop(p); - break; -#endif /* CONFIG_HOTPLUG_CPU */ - } - return NOTIFY_OK; -} - -static struct notifier_block __cpuinitdata cpu_nfb = { - .notifier_call = cpu_callback -}; - -static int __initdata nosoftlockup; - -static int __init nosoftlockup_setup(char *str) -{ - nosoftlockup = 1; - return 1; -} -__setup("nosoftlockup", nosoftlockup_setup); - -static int __init spawn_softlockup_task(void) -{ - void *cpu = (void *)(long)smp_processor_id(); - int err; - - if (nosoftlockup) - return 0; - - err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); - if (err == NOTIFY_BAD) { - BUG(); - return 1; - } - cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); - register_cpu_notifier(&cpu_nfb); - - atomic_notifier_chain_register(&panic_notifier_list, &panic_block); - - return 0; -} -early_initcall(spawn_softlockup_task); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 999bc3f..04bcd8a 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -108,7 +108,7 @@ extern int blk_iopoll_enabled; #endif /* Constants used for minimum and maximum */ -#ifdef CONFIG_DETECT_SOFTLOCKUP +#ifdef CONFIG_LOCKUP_DETECTOR static int sixty = 60; static int neg_one = -1; #endif @@ -703,6 +703,15 @@ static struct ctl_table kern_table[] = { .extra1 = &neg_one, .extra2 = &sixty, }, + { + .procname = "softlockup_panic", + .data = &softlockup_panic, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, #endif #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) && !defined(CONFIG_LOCKUP_DETECTOR) { @@ -807,17 +816,6 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec, }, #endif -#ifdef CONFIG_DETECT_SOFTLOCKUP - { - .procname = "softlockup_panic", - .data = &softlockup_panic, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, - .extra2 = &one, - }, -#endif #ifdef CONFIG_DETECT_HUNG_TASK { .procname = "hung_task_panic", -- cgit v1.1 From f69bcf60c3f17aa367e16eef7bc6ab001ea6d58a Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Fri, 7 May 2010 17:11:47 -0400 Subject: lockup_detector: Remove nmi_watchdog.c file This file migrated to kernel/watchdog.c and then combined with kernel/softlockup.c. As a result kernel/nmi_watchdog.c is no longer needed. Just remove it. Signed-off-by: Don Zickus Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Cyrill Gorcunov Cc: Eric Paris Cc: Randy Dunlap LKML-Reference: <1273266711-18706-5-git-send-email-dzickus@redhat.com> Signed-off-by: Frederic Weisbecker --- kernel/nmi_watchdog.c | 259 -------------------------------------------------- 1 file changed, 259 deletions(-) delete mode 100644 kernel/nmi_watchdog.c (limited to 'kernel') diff --git a/kernel/nmi_watchdog.c b/kernel/nmi_watchdog.c deleted file mode 100644 index a79d211..0000000 --- a/kernel/nmi_watchdog.c +++ /dev/null @@ -1,259 +0,0 @@ -/* - * Detect Hard Lockups using the NMI - * - * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. - * - * this code detects hard lockups: incidents in where on a CPU - * the kernel does not respond to anything except NMI. - * - * Note: Most of this code is borrowed heavily from softlockup.c, - * so thanks to Ingo for the initial implementation. - * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks - * to those contributors as well. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -static DEFINE_PER_CPU(struct perf_event *, nmi_watchdog_ev); -static DEFINE_PER_CPU(int, nmi_watchdog_touch); -static DEFINE_PER_CPU(long, alert_counter); - -static int panic_on_timeout; - -void touch_nmi_watchdog(void) -{ - __raw_get_cpu_var(nmi_watchdog_touch) = 1; - touch_softlockup_watchdog(); -} -EXPORT_SYMBOL(touch_nmi_watchdog); - -void touch_all_nmi_watchdog(void) -{ - int cpu; - - for_each_online_cpu(cpu) - per_cpu(nmi_watchdog_touch, cpu) = 1; - touch_softlockup_watchdog(); -} - -static int __init setup_nmi_watchdog(char *str) -{ - if (!strncmp(str, "panic", 5)) { - panic_on_timeout = 1; - str = strchr(str, ','); - if (!str) - return 1; - ++str; - } - return 1; -} -__setup("nmi_watchdog=", setup_nmi_watchdog); - -struct perf_event_attr wd_hw_attr = { - .type = PERF_TYPE_HARDWARE, - .config = PERF_COUNT_HW_CPU_CYCLES, - .size = sizeof(struct perf_event_attr), - .pinned = 1, - .disabled = 1, -}; - -struct perf_event_attr wd_sw_attr = { - .type = PERF_TYPE_SOFTWARE, - .config = PERF_COUNT_SW_CPU_CLOCK, - .size = sizeof(struct perf_event_attr), - .pinned = 1, - .disabled = 1, -}; - -void wd_overflow(struct perf_event *event, int nmi, - struct perf_sample_data *data, - struct pt_regs *regs) -{ - int cpu = smp_processor_id(); - int touched = 0; - - if (__get_cpu_var(nmi_watchdog_touch)) { - per_cpu(nmi_watchdog_touch, cpu) = 0; - touched = 1; - } - - /* check to see if the cpu is doing anything */ - if (!touched && hw_nmi_is_cpu_stuck(regs)) { - /* - * Ayiee, looks like this CPU is stuck ... - * wait a few IRQs (5 seconds) before doing the oops ... - */ - per_cpu(alert_counter, cpu) += 1; - if (per_cpu(alert_counter, cpu) == 5) { - if (panic_on_timeout) - panic("NMI Watchdog detected LOCKUP on cpu %d", cpu); - else - WARN(1, "NMI Watchdog detected LOCKUP on cpu %d", cpu); - } - } else { - per_cpu(alert_counter, cpu) = 0; - } - - return; -} - -static int enable_nmi_watchdog(int cpu) -{ - struct perf_event *event; - struct perf_event_attr *wd_attr; - - event = per_cpu(nmi_watchdog_ev, cpu); - if (event && event->state > PERF_EVENT_STATE_OFF) - return 0; - - if (event == NULL) { - /* Try to register using hardware perf events first */ - wd_attr = &wd_hw_attr; - wd_attr->sample_period = hw_nmi_get_sample_period(); - event = perf_event_create_kernel_counter(wd_attr, cpu, -1, wd_overflow); - if (IS_ERR(event)) { - /* hardware doesn't exist or not supported, fallback to software events */ - printk(KERN_INFO "nmi_watchdog: hardware not available, trying software events\n"); - wd_attr = &wd_sw_attr; - wd_attr->sample_period = NSEC_PER_SEC; - event = perf_event_create_kernel_counter(wd_attr, cpu, -1, wd_overflow); - if (IS_ERR(event)) { - printk(KERN_ERR "nmi watchdog failed to create perf event on %i: %p\n", cpu, event); - return -1; - } - } - per_cpu(nmi_watchdog_ev, cpu) = event; - } - perf_event_enable(per_cpu(nmi_watchdog_ev, cpu)); - return 0; -} - -static void disable_nmi_watchdog(int cpu) -{ - struct perf_event *event; - - event = per_cpu(nmi_watchdog_ev, cpu); - if (event) { - perf_event_disable(per_cpu(nmi_watchdog_ev, cpu)); - per_cpu(nmi_watchdog_ev, cpu) = NULL; - perf_event_release_kernel(event); - } -} - -#ifdef CONFIG_SYSCTL -/* - * proc handler for /proc/sys/kernel/nmi_watchdog - */ -int nmi_watchdog_enabled; - -int proc_nmi_enabled(struct ctl_table *table, int write, - void __user *buffer, size_t *length, loff_t *ppos) -{ - int cpu; - - if (!write) { - struct perf_event *event; - for_each_online_cpu(cpu) { - event = per_cpu(nmi_watchdog_ev, cpu); - if (event && event->state > PERF_EVENT_STATE_OFF) { - nmi_watchdog_enabled = 1; - break; - } - } - proc_dointvec(table, write, buffer, length, ppos); - return 0; - } - - touch_all_nmi_watchdog(); - proc_dointvec(table, write, buffer, length, ppos); - if (nmi_watchdog_enabled) { - for_each_online_cpu(cpu) - if (enable_nmi_watchdog(cpu)) { - printk(KERN_ERR "NMI watchdog failed configuration, " - " can not be enabled\n"); - } - } else { - for_each_online_cpu(cpu) - disable_nmi_watchdog(cpu); - } - return 0; -} - -#endif /* CONFIG_SYSCTL */ - -/* - * Create/destroy watchdog threads as CPUs come and go: - */ -static int __cpuinit -cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) -{ - int hotcpu = (unsigned long)hcpu; - - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - per_cpu(nmi_watchdog_touch, hotcpu) = 0; - break; - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - if (enable_nmi_watchdog(hotcpu)) - return NOTIFY_BAD; - break; -#ifdef CONFIG_HOTPLUG_CPU - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - disable_nmi_watchdog(hotcpu); - case CPU_DEAD: - case CPU_DEAD_FROZEN: - break; -#endif /* CONFIG_HOTPLUG_CPU */ - } - return NOTIFY_OK; -} - -static struct notifier_block __cpuinitdata cpu_nfb = { - .notifier_call = cpu_callback -}; - -static int __initdata nonmi_watchdog; - -static int __init nonmi_watchdog_setup(char *str) -{ - nonmi_watchdog = 1; - return 1; -} -__setup("nonmi_watchdog", nonmi_watchdog_setup); - -static int __init spawn_nmi_watchdog_task(void) -{ - void *cpu = (void *)(long)smp_processor_id(); - int err; - - if (nonmi_watchdog) - return 0; - - printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); - - err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); - if (err == NOTIFY_BAD) { - BUG(); - return 1; - } - cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); - register_cpu_notifier(&cpu_nfb); - - return 0; -} -early_initcall(spawn_nmi_watchdog_task); -- cgit v1.1 From d7c547335fa6b0090fa09c46ea0e965ac273a27e Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Fri, 7 May 2010 17:11:51 -0400 Subject: lockup_detector: Separate touch_nmi_watchdog code path from touch_watchdog When I combined the nmi_watchdog (hardlockup) and softlockup code, I also combined the paths the touch_watchdog and touch_nmi_watchdog took. This may not be the best idea as pointed out by Frederic W., that the touch_watchdog case probably should not reset the hardlockup count. Therefore the patch below falls back to the previous idea of keeping the touch_nmi_watchdog a superset of the touch_watchdog case. Signed-off-by: Don Zickus Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Cyrill Gorcunov Cc: Eric Paris Cc: Randy Dunlap LKML-Reference: <1273266711-18706-9-git-send-email-dzickus@redhat.com> Signed-off-by: Frederic Weisbecker --- kernel/watchdog.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index f1541b7..57b8e2c 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -31,6 +31,7 @@ int watchdog_enabled; int __read_mostly softlockup_thresh = 60; static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); +static DEFINE_PER_CPU(bool, watchdog_nmi_touch); static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); static DEFINE_PER_CPU(bool, softlockup_touch_sync); @@ -139,6 +140,7 @@ void touch_all_softlockup_watchdogs(void) void touch_nmi_watchdog(void) { + __get_cpu_var(watchdog_nmi_touch) = true; touch_softlockup_watchdog(); } EXPORT_SYMBOL(touch_nmi_watchdog); @@ -201,10 +203,9 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi, struct pt_regs *regs) { int this_cpu = smp_processor_id(); - unsigned long touch_ts = per_cpu(watchdog_touch_ts, this_cpu); - if (touch_ts == 0) { - __touch_watchdog(); + if (__get_cpu_var(watchdog_nmi_touch) == true) { + __get_cpu_var(watchdog_nmi_touch) = false; return; } -- cgit v1.1 From 0167c781907fcdc3e1f144ef5ce31d402c91eb94 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 13 May 2010 08:53:33 +0200 Subject: watchdog: Export touch_softlockup_watchdog There are modules that rely on it: ERROR: "touch_softlockup_watchdog" [drivers/video/nvidia/nvidiafb.ko] undefined! Cc: Frederic Weisbecker Cc: Don Zickus Cc: Peter Zijlstra Cc: Cyrill Gorcunov LKML-Reference: <1273713674-8434-1-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 57b8e2c..be5e74e 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -124,6 +124,7 @@ void touch_softlockup_watchdog(void) { __get_cpu_var(watchdog_touch_ts) = 0; } +EXPORT_SYMBOL(touch_softlockup_watchdog); void touch_all_softlockup_watchdogs(void) { -- cgit v1.1 From 23637d477c1f53acbb176a02c241d60a25888fae Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 15 May 2010 23:15:20 +0200 Subject: lockup_detector: Introduce CONFIG_HARDLOCKUP_DETECTOR This new config is deemed to simplify even more the lockup detector dependencies and can make it easier to bring a smooth sorting between archs that support the new generic lockup detector and those that still have their own, especially for those that are in the middle of this migration. Instead of checking whether we have CONFIG_LOCKUP_DETECTOR + CONFIG_PERF_EVENTS_NMI each time an arch wants to know if it needs to build its own lockup detector, take a shortcut with this new config. It is enabled only if the hardlockup detection part of the whole lockup detector is on. Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Don Zickus Cc: Cyrill Gorcunov --- kernel/watchdog.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index be5e74e..83fb631 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -37,7 +37,7 @@ static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); static DEFINE_PER_CPU(bool, softlockup_touch_sync); static DEFINE_PER_CPU(bool, hard_watchdog_warn); static DEFINE_PER_CPU(bool, soft_watchdog_warn); -#ifdef CONFIG_PERF_EVENTS_NMI +#ifdef CONFIG_HARDLOCKUP_DETECTOR static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); @@ -51,7 +51,7 @@ static int __initdata no_watchdog; /* * Should we panic when a soft-lockup or hard-lockup occurs: */ -#ifdef CONFIG_PERF_EVENTS_NMI +#ifdef CONFIG_HARDLOCKUP_DETECTOR static int hardlockup_panic; static int __init hardlockup_panic_setup(char *str) @@ -152,7 +152,7 @@ void touch_softlockup_watchdog_sync(void) __raw_get_cpu_var(watchdog_touch_ts) = 0; } -#ifdef CONFIG_PERF_EVENTS_NMI +#ifdef CONFIG_HARDLOCKUP_DETECTOR /* watchdog detector functions */ static int is_hardlockup(int cpu) { @@ -189,7 +189,7 @@ static struct notifier_block panic_block = { .notifier_call = watchdog_panic, }; -#ifdef CONFIG_PERF_EVENTS_NMI +#ifdef CONFIG_HARDLOCKUP_DETECTOR static struct perf_event_attr wd_hw_attr = { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, @@ -239,7 +239,7 @@ static void watchdog_interrupt_count(void) } #else static inline void watchdog_interrupt_count(void) { return; } -#endif /* CONFIG_PERF_EVENTS_NMI */ +#endif /* CONFIG_HARDLOCKUP_DETECTOR */ /* watchdog kicker functions */ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) @@ -342,7 +342,7 @@ static int watchdog(void *__bind_cpu) } -#ifdef CONFIG_PERF_EVENTS_NMI +#ifdef CONFIG_HARDLOCKUP_DETECTOR static int watchdog_nmi_enable(int cpu) { struct perf_event_attr *wd_attr; @@ -393,7 +393,7 @@ static void watchdog_nmi_disable(int cpu) #else static int watchdog_nmi_enable(int cpu) { return 0; } static void watchdog_nmi_disable(int cpu) { return; } -#endif /* CONFIG_PERF_EVENTS_NMI */ +#endif /* CONFIG_HARDLOCKUP_DETECTOR */ /* prepare/enable/disable routines */ static int watchdog_prepare_cpu(int cpu) -- cgit v1.1 From cafcd80d216bc2136b8edbb794327e495792c666 Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Fri, 14 May 2010 11:11:21 -0400 Subject: lockup_detector: Cross arch compile fixes Combining the softlockup and hardlockup code causes watchdog.c to build even without the hardlockup detection support. So if an arch, that has the previous and the new nmi watchdog implementations cohabiting, wants to know if the generic one is in use, CONFIG_LOCKUP_DETECTOR is not a reliable check. We need to use CONFIG_HARDLOCKUP_DETECTOR instead. Fixes: kernel/built-in.o: In function `touch_nmi_watchdog': (.text+0x449bc): multiple definition of `touch_nmi_watchdog' arch/sparc/kernel/built-in.o:(.text+0x11b28): first defined here Signed-off-by: Don Zickus Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Don Zickus Cc: Cyrill Gorcunov LKML-Reference: <20100514151121.GR15159@redhat.com> [ use CONFIG_HARDLOCKUP_DETECTOR instead of CONFIG_PERF_EVENTS_NMI] Signed-off-by: Frederic Weisbecker --- kernel/watchdog.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 83fb631..e53622c 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -31,13 +31,13 @@ int watchdog_enabled; int __read_mostly softlockup_thresh = 60; static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); -static DEFINE_PER_CPU(bool, watchdog_nmi_touch); static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); static DEFINE_PER_CPU(bool, softlockup_touch_sync); -static DEFINE_PER_CPU(bool, hard_watchdog_warn); static DEFINE_PER_CPU(bool, soft_watchdog_warn); #ifdef CONFIG_HARDLOCKUP_DETECTOR +static DEFINE_PER_CPU(bool, hard_watchdog_warn); +static DEFINE_PER_CPU(bool, watchdog_nmi_touch); static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); @@ -139,6 +139,7 @@ void touch_all_softlockup_watchdogs(void) per_cpu(watchdog_touch_ts, cpu) = 0; } +#ifdef CONFIG_HARDLOCKUP_DETECTOR void touch_nmi_watchdog(void) { __get_cpu_var(watchdog_nmi_touch) = true; @@ -146,6 +147,8 @@ void touch_nmi_watchdog(void) } EXPORT_SYMBOL(touch_nmi_watchdog); +#endif + void touch_softlockup_watchdog_sync(void) { __raw_get_cpu_var(softlockup_touch_sync) = true; -- cgit v1.1 From 26e09c6eee14f4827b55137ba0eedc4e77cd50ab Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Mon, 17 May 2010 18:06:04 -0400 Subject: lockup_detector: Convert per_cpu to __get_cpu_var for readability Just a bunch of conversions as suggested by Frederic W. __get_cpu_var() provides preemption disabled checks. Plus it gives more readability as it makes it obvious we are dealing locally now with these vars. Signed-off-by: Don Zickus Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Don Zickus Cc: Cyrill Gorcunov LKML-Reference: <1274133966-18415-2-git-send-email-dzickus@redhat.com> Signed-off-by: Frederic Weisbecker --- kernel/watchdog.c | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) (limited to 'kernel') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index e53622c..91b0b26 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -115,7 +115,7 @@ static unsigned long get_sample_period(void) /* Commands for resetting the watchdog */ static void __touch_watchdog(void) { - int this_cpu = raw_smp_processor_id(); + int this_cpu = smp_processor_id(); __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu); } @@ -157,21 +157,21 @@ void touch_softlockup_watchdog_sync(void) #ifdef CONFIG_HARDLOCKUP_DETECTOR /* watchdog detector functions */ -static int is_hardlockup(int cpu) +static int is_hardlockup(void) { - unsigned long hrint = per_cpu(hrtimer_interrupts, cpu); + unsigned long hrint = __get_cpu_var(hrtimer_interrupts); - if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint) + if (__get_cpu_var(hrtimer_interrupts_saved) == hrint) return 1; - per_cpu(hrtimer_interrupts_saved, cpu) = hrint; + __get_cpu_var(hrtimer_interrupts_saved) = hrint; return 0; } #endif -static int is_softlockup(unsigned long touch_ts, int cpu) +static int is_softlockup(unsigned long touch_ts) { - unsigned long now = get_timestamp(cpu); + unsigned long now = get_timestamp(smp_processor_id()); /* Warn about unreasonable delays: */ if (time_after(now, touch_ts + softlockup_thresh)) @@ -206,8 +206,6 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { - int this_cpu = smp_processor_id(); - if (__get_cpu_var(watchdog_nmi_touch) == true) { __get_cpu_var(watchdog_nmi_touch) = false; return; @@ -219,7 +217,9 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi, * fired multiple times before we overflow'd. If it hasn't * then this is a good indication the cpu is stuck */ - if (is_hardlockup(this_cpu)) { + if (is_hardlockup()) { + int this_cpu = smp_processor_id(); + /* only print hardlockups once */ if (__get_cpu_var(hard_watchdog_warn) == true) return; @@ -247,7 +247,6 @@ static inline void watchdog_interrupt_count(void) { return; } /* watchdog kicker functions */ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) { - int this_cpu = smp_processor_id(); unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts); struct pt_regs *regs = get_irq_regs(); int duration; @@ -262,12 +261,12 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); if (touch_ts == 0) { - if (unlikely(per_cpu(softlockup_touch_sync, this_cpu))) { + if (unlikely(__get_cpu_var(softlockup_touch_sync))) { /* * If the time stamp was touched atomically * make sure the scheduler tick is up to date. */ - per_cpu(softlockup_touch_sync, this_cpu) = false; + __get_cpu_var(softlockup_touch_sync) = false; sched_clock_tick(); } __touch_watchdog(); @@ -280,14 +279,14 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) * indicate it is getting cpu time. If it hasn't then * this is a good indication some task is hogging the cpu */ - duration = is_softlockup(touch_ts, this_cpu); + duration = is_softlockup(touch_ts); if (unlikely(duration)) { /* only warn once */ if (__get_cpu_var(soft_watchdog_warn) == true) return HRTIMER_RESTART; printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", - this_cpu, duration, + smp_processor_id(), duration, current->comm, task_pid_nr(current)); print_modules(); print_irqtrace_events(current); @@ -309,10 +308,10 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) /* * The watchdog thread - touches the timestamp. */ -static int watchdog(void *__bind_cpu) +static int watchdog(void *unused) { struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; - struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, (unsigned long)__bind_cpu); + struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); sched_setscheduler(current, SCHED_FIFO, ¶m); @@ -328,7 +327,7 @@ static int watchdog(void *__bind_cpu) /* * Run briefly once per second to reset the softlockup timestamp. * If this gets delayed for more than 60 seconds then the - * debug-printout triggers in softlockup_tick(). + * debug-printout triggers in watchdog_timer_fn(). */ while (!kthread_should_stop()) { __touch_watchdog(); -- cgit v1.1 From 749d811f10a410b64cf4c674c498ec04316ec373 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 3 Jun 2010 20:19:28 +1000 Subject: padata: add parenthesis in MAX_SEQ_NR macro MAX_SEQ_NR is used in padata_alloc_pd() like this: pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1; It needs parenthesis or the divide by num_cpus takes precedence over the subtraction. Signed-off-by: Dan Carpenter Acked-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/padata.c b/kernel/padata.c index b1c9857..ff8de1b 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -28,7 +28,7 @@ #include #include -#define MAX_SEQ_NR INT_MAX - NR_CPUS +#define MAX_SEQ_NR (INT_MAX - NR_CPUS) #define MAX_OBJ_NUM 1000 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) -- cgit v1.1 From d1f74e20b5b064a130cd0743a256c2d3cfe84010 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 2 Jun 2010 21:52:29 -0400 Subject: tracing/sched: Make preempt_schedule() notrace The function tracer code uses ftrace_preempt_disable() to disable preemption instead of normal preempt_disable(). But there's a slight race condition that may cause it to lose a preemption check. This was made to keep the function tracer from recursing on itself by disabling preemption then having the enable call the function tracer again, causing infinite recursion. The bug was assumed to happen if the call was just in schedule, but this is incorrect. The bug is caused by preempt_schedule() which is called by preempt_enable(). The calling of preempt_enable() when NEED_RESCHED was set would call preempt_schedule() which would call the function tracer again. By making the preempt_schedule() and add_preempt_count() notrace then this will prevent the inifinite recursion. This is because the add_preempt_count() would stop the preempt_enable() in the function tracer from calling preempt_schedule() again. The sub_preempt_count() is also made notrace just to keep it symmetric. Signed-off-by: Steven Rostedt --- kernel/sched.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 15b93f6..cd6787e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3730,7 +3730,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) * off of preempt_enable. Kernel preemptions off return from interrupt * occur there and call schedule directly. */ -asmlinkage void __sched preempt_schedule(void) +asmlinkage void __sched notrace preempt_schedule(void) { struct thread_info *ti = current_thread_info(); @@ -3742,9 +3742,9 @@ asmlinkage void __sched preempt_schedule(void) return; do { - add_preempt_count(PREEMPT_ACTIVE); + add_preempt_count_notrace(PREEMPT_ACTIVE); schedule(); - sub_preempt_count(PREEMPT_ACTIVE); + sub_preempt_count_notrace(PREEMPT_ACTIVE); /* * Check again in case we missed a preemption opportunity -- cgit v1.1 From 5168ae50a66e3ff7184c2b16d661bd6d70367e50 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 3 Jun 2010 09:36:50 -0400 Subject: tracing: Remove ftrace_preempt_disable/enable The ftrace_preempt_disable/enable functions were to address a recursive race caused by the function tracer. The function tracer traces all functions which makes it easily susceptible to recursion. One area was preempt_enable(). This would call the scheduler and the schedulre would call the function tracer and loop. (So was it thought). The ftrace_preempt_disable/enable was made to protect against recursion inside the scheduler by storing the NEED_RESCHED flag. If it was set before the ftrace_preempt_disable() it would not call schedule on ftrace_preempt_enable(), thinking that if it was set before then it would have already scheduled unless it was already in the scheduler. This worked fine except in the case of SMP, where another task would set the NEED_RESCHED flag for a task on another CPU, and then kick off an IPI to trigger it. This could cause the NEED_RESCHED to be saved at ftrace_preempt_disable() but the IPI to arrive in the the preempt disabled section. The ftrace_preempt_enable() would not call the scheduler because the flag was already set before entring the section. This bug would cause a missed preemption check and cause lower latencies. Investigating further, I found that the recusion caused by the function tracer was not due to schedule(), but due to preempt_schedule(). Now that preempt_schedule is completely annotated with notrace, the recusion no longer is an issue. Reported-by: Thomas Gleixner Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 5 ++-- kernel/trace/ring_buffer.c | 38 +++++++------------------------ kernel/trace/trace.c | 5 ++-- kernel/trace/trace.h | 48 --------------------------------------- kernel/trace/trace_clock.c | 5 ++-- kernel/trace/trace_events.c | 5 ++-- kernel/trace/trace_functions.c | 6 ++--- kernel/trace/trace_sched_wakeup.c | 5 ++-- kernel/trace/trace_stack.c | 6 ++--- 9 files changed, 24 insertions(+), 99 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6d2cb14..0d88ce9 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1883,7 +1883,6 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip) struct hlist_head *hhd; struct hlist_node *n; unsigned long key; - int resched; key = hash_long(ip, FTRACE_HASH_BITS); @@ -1897,12 +1896,12 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip) * period. This syncs the hash iteration and freeing of items * on the hash. rcu_read_lock is too dangerous here. */ - resched = ftrace_preempt_disable(); + preempt_disable_notrace(); hlist_for_each_entry_rcu(entry, n, hhd, node) { if (entry->ip == ip) entry->ops->func(ip, parent_ip, &entry->data); } - ftrace_preempt_enable(resched); + preempt_enable_notrace(); } static struct ftrace_ops trace_probe_ops __read_mostly = diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 7f6059c..c3d3cd9 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2234,8 +2234,6 @@ static void trace_recursive_unlock(void) #endif -static DEFINE_PER_CPU(int, rb_need_resched); - /** * ring_buffer_lock_reserve - reserve a part of the buffer * @buffer: the ring buffer to reserve from @@ -2256,13 +2254,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) { struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_event *event; - int cpu, resched; + int cpu; if (ring_buffer_flags != RB_BUFFERS_ON) return NULL; /* If we are tracing schedule, we don't want to recurse */ - resched = ftrace_preempt_disable(); + preempt_disable_notrace(); if (atomic_read(&buffer->record_disabled)) goto out_nocheck; @@ -2287,21 +2285,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) if (!event) goto out; - /* - * Need to store resched state on this cpu. - * Only the first needs to. - */ - - if (preempt_count() == 1) - per_cpu(rb_need_resched, cpu) = resched; - return event; out: trace_recursive_unlock(); out_nocheck: - ftrace_preempt_enable(resched); + preempt_enable_notrace(); return NULL; } EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); @@ -2347,13 +2337,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, trace_recursive_unlock(); - /* - * Only the last preempt count needs to restore preemption. - */ - if (preempt_count() == 1) - ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); - else - preempt_enable_no_resched_notrace(); + preempt_enable_notrace(); return 0; } @@ -2461,13 +2445,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, trace_recursive_unlock(); - /* - * Only the last preempt count needs to restore preemption. - */ - if (preempt_count() == 1) - ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); - else - preempt_enable_no_resched_notrace(); + preempt_enable_notrace(); } EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); @@ -2493,12 +2471,12 @@ int ring_buffer_write(struct ring_buffer *buffer, struct ring_buffer_event *event; void *body; int ret = -EBUSY; - int cpu, resched; + int cpu; if (ring_buffer_flags != RB_BUFFERS_ON) return -EBUSY; - resched = ftrace_preempt_disable(); + preempt_disable_notrace(); if (atomic_read(&buffer->record_disabled)) goto out; @@ -2528,7 +2506,7 @@ int ring_buffer_write(struct ring_buffer *buffer, ret = 0; out: - ftrace_preempt_enable(resched); + preempt_enable_notrace(); return ret; } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 55e4851..3572714 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1404,7 +1404,6 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) struct bprint_entry *entry; unsigned long flags; int disable; - int resched; int cpu, len = 0, size, pc; if (unlikely(tracing_selftest_running || tracing_disabled)) @@ -1414,7 +1413,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) pause_graph_tracing(); pc = preempt_count(); - resched = ftrace_preempt_disable(); + preempt_disable_notrace(); cpu = raw_smp_processor_id(); data = tr->data[cpu]; @@ -1452,7 +1451,7 @@ out_unlock: out: atomic_dec_return(&data->disabled); - ftrace_preempt_enable(resched); + preempt_enable_notrace(); unpause_graph_tracing(); return len; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 2cd9639..6c45e55 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -628,54 +628,6 @@ enum trace_iterator_flags { extern struct tracer nop_trace; -/** - * ftrace_preempt_disable - disable preemption scheduler safe - * - * When tracing can happen inside the scheduler, there exists - * cases that the tracing might happen before the need_resched - * flag is checked. If this happens and the tracer calls - * preempt_enable (after a disable), a schedule might take place - * causing an infinite recursion. - * - * To prevent this, we read the need_resched flag before - * disabling preemption. When we want to enable preemption we - * check the flag, if it is set, then we call preempt_enable_no_resched. - * Otherwise, we call preempt_enable. - * - * The rational for doing the above is that if need_resched is set - * and we have yet to reschedule, we are either in an atomic location - * (where we do not need to check for scheduling) or we are inside - * the scheduler and do not want to resched. - */ -static inline int ftrace_preempt_disable(void) -{ - int resched; - - resched = need_resched(); - preempt_disable_notrace(); - - return resched; -} - -/** - * ftrace_preempt_enable - enable preemption scheduler safe - * @resched: the return value from ftrace_preempt_disable - * - * This is a scheduler safe way to enable preemption and not miss - * any preemption checks. The disabled saved the state of preemption. - * If resched is set, then we are either inside an atomic or - * are inside the scheduler (we would have already scheduled - * otherwise). In this case, we do not want to call normal - * preempt_enable, but preempt_enable_no_resched instead. - */ -static inline void ftrace_preempt_enable(int resched) -{ - if (resched) - preempt_enable_no_resched_notrace(); - else - preempt_enable_notrace(); -} - #ifdef CONFIG_BRANCH_TRACER extern int enable_branch_tracing(struct trace_array *tr); extern void disable_branch_tracing(void); diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 9d589d8..52fda6c 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -32,16 +32,15 @@ u64 notrace trace_clock_local(void) { u64 clock; - int resched; /* * sched_clock() is an architecture implemented, fast, scalable, * lockless clock. It is not guaranteed to be coherent across * CPUs, nor across CPU idle events. */ - resched = ftrace_preempt_disable(); + preempt_disable_notrace(); clock = sched_clock(); - ftrace_preempt_enable(resched); + preempt_enable_notrace(); return clock; } diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 53cffc0..a594f9a 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1524,12 +1524,11 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) struct ftrace_entry *entry; unsigned long flags; long disabled; - int resched; int cpu; int pc; pc = preempt_count(); - resched = ftrace_preempt_disable(); + preempt_disable_notrace(); cpu = raw_smp_processor_id(); disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); @@ -1551,7 +1550,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) out: atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); - ftrace_preempt_enable(resched); + preempt_enable_notrace(); } static struct ftrace_ops trace_ops __initdata = diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index b3f3776..16aee4d 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -54,14 +54,14 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) struct trace_array_cpu *data; unsigned long flags; long disabled; - int cpu, resched; + int cpu; int pc; if (unlikely(!ftrace_function_enabled)) return; pc = preempt_count(); - resched = ftrace_preempt_disable(); + preempt_disable_notrace(); local_save_flags(flags); cpu = raw_smp_processor_id(); data = tr->data[cpu]; @@ -71,7 +71,7 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) trace_function(tr, ip, parent_ip, flags, pc); atomic_dec(&data->disabled); - ftrace_preempt_enable(resched); + preempt_enable_notrace(); } static void diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 0e73bc2..c9fd5bd 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -46,7 +46,6 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) struct trace_array_cpu *data; unsigned long flags; long disabled; - int resched; int cpu; int pc; @@ -54,7 +53,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) return; pc = preempt_count(); - resched = ftrace_preempt_disable(); + preempt_disable_notrace(); cpu = raw_smp_processor_id(); if (cpu != wakeup_current_cpu) @@ -74,7 +73,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) out: atomic_dec(&data->disabled); out_enable: - ftrace_preempt_enable(resched); + preempt_enable_notrace(); } static struct ftrace_ops trace_ops __read_mostly = diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index f4bc9b2..056468e 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -110,12 +110,12 @@ static inline void check_stack(void) static void stack_trace_call(unsigned long ip, unsigned long parent_ip) { - int cpu, resched; + int cpu; if (unlikely(!ftrace_enabled || stack_trace_disabled)) return; - resched = ftrace_preempt_disable(); + preempt_disable_notrace(); cpu = raw_smp_processor_id(); /* no atomic needed, we only modify this variable by this cpu */ @@ -127,7 +127,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) out: per_cpu(trace_active, cpu)--; /* prevent recursion in schedule */ - ftrace_preempt_enable(resched); + preempt_enable_notrace(); } static struct ftrace_ops trace_ops __read_mostly = -- cgit v1.1 From 50a323b73069b169385a8ac65633dee837a7d13f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 8 Jun 2010 21:40:36 +0200 Subject: sched: define and use CPU_PRI_* enums for cpu notifier priorities Instead of hardcoding priority 10 and 20 in sched and perf, collect them into CPU_PRI_* enums. Signed-off-by: Tejun Heo Acked-by: Peter Zijlstra Cc: Rusty Russell Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo --- kernel/sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index f8b8996..552faf8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -5801,7 +5801,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) */ static struct notifier_block __cpuinitdata migration_notifier = { .notifier_call = migration_call, - .priority = 10 + .priority = CPU_PRI_MIGRATION, }; static int __init migration_init(void) -- cgit v1.1 From 3a101d0548e925ab16ca6aaa8cf4f767d322ddb0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 8 Jun 2010 21:40:36 +0200 Subject: sched: adjust when cpu_active and cpuset configurations are updated during cpu on/offlining Currently, when a cpu goes down, cpu_active is cleared before CPU_DOWN_PREPARE starts and cpuset configuration is updated from a default priority cpu notifier. When a cpu is coming up, it's set before CPU_ONLINE but cpuset configuration again is updated from the same cpu notifier. For cpu notifiers, this presents an inconsistent state. Threads which a CPU_DOWN_PREPARE notifier expects to be bound to the CPU can be migrated to other cpus because the cpu is no more inactive. Fix it by updating cpu_active in the highest priority cpu notifier and cpuset configuration in the second highest when a cpu is coming up. Down path is updated similarly. This guarantees that all other cpu notifiers see consistent cpu_active and cpuset configuration. cpuset_track_online_cpus() notifier is converted to cpuset_update_active_cpus() which just updates the configuration and now called from cpuset_cpu_[in]active() notifiers registered from sched_init_smp(). If cpuset is disabled, cpuset_update_active_cpus() degenerates into partition_sched_domains() making separate notifier for !CONFIG_CPUSETS unnecessary. This problem is triggered by cmwq. During CPU_DOWN_PREPARE, hotplug callback creates a kthread and kthread_bind()s it to the target cpu, and the thread is expected to run on that cpu. * Ingo's test discovered __cpuinit/exit markups were incorrect. Fixed. Signed-off-by: Tejun Heo Acked-by: Peter Zijlstra Cc: Rusty Russell Cc: Ingo Molnar Cc: Paul Menage --- kernel/cpu.c | 6 ------ kernel/cpuset.c | 21 ++---------------- kernel/sched.c | 67 ++++++++++++++++++++++++++++++++++++++++++--------------- 3 files changed, 52 insertions(+), 42 deletions(-) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index 97d1b42..f6e726f 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -235,11 +235,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) return -EINVAL; cpu_hotplug_begin(); - set_cpu_active(cpu, false); err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err) { - set_cpu_active(cpu, true); - nr_calls--; __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); printk("%s: attempt to take down CPU %u failed\n", @@ -249,7 +246,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { - set_cpu_active(cpu, true); /* CPU didn't die: tell everyone. Can't complain. */ cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); @@ -321,8 +317,6 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) goto out_notify; BUG_ON(!cpu_online(cpu)); - set_cpu_active(cpu, true); - /* Now call notifier in preparation. */ cpu_notify(CPU_ONLINE | mod, hcpu); diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 02b9611..05727dc 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -2113,31 +2113,17 @@ static void scan_for_empty_cpusets(struct cpuset *root) * but making no active use of cpusets. * * This routine ensures that top_cpuset.cpus_allowed tracks - * cpu_online_map on each CPU hotplug (cpuhp) event. + * cpu_active_mask on each CPU hotplug (cpuhp) event. * * Called within get_online_cpus(). Needs to call cgroup_lock() * before calling generate_sched_domains(). */ -static int cpuset_track_online_cpus(struct notifier_block *unused_nb, - unsigned long phase, void *unused_cpu) +void __cpuexit cpuset_update_active_cpus(void) { struct sched_domain_attr *attr; cpumask_var_t *doms; int ndoms; - switch (phase) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: - break; - - default: - return NOTIFY_DONE; - } - cgroup_lock(); mutex_lock(&callback_mutex); cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); @@ -2148,8 +2134,6 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, /* Have scheduler rebuild the domains */ partition_sched_domains(ndoms, doms, attr); - - return NOTIFY_OK; } #ifdef CONFIG_MEMORY_HOTPLUG @@ -2203,7 +2187,6 @@ void __init cpuset_init_smp(void) cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; - hotcpu_notifier(cpuset_track_online_cpus, 0); hotplug_memory_notifier(cpuset_track_online_nodes, 10); cpuset_wq = create_singlethread_workqueue("cpuset"); diff --git a/kernel/sched.c b/kernel/sched.c index 552faf8..2b942e4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -5804,17 +5804,46 @@ static struct notifier_block __cpuinitdata migration_notifier = { .priority = CPU_PRI_MIGRATION, }; +static int __cpuinit sched_cpu_active(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: + case CPU_DOWN_FAILED: + set_cpu_active((long)hcpu, true); + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + +static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DOWN_PREPARE: + set_cpu_active((long)hcpu, false); + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + static int __init migration_init(void) { void *cpu = (void *)(long)smp_processor_id(); int err; - /* Start one for the boot CPU: */ + /* Initialize migration for the boot CPU */ err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); BUG_ON(err == NOTIFY_BAD); migration_call(&migration_notifier, CPU_ONLINE, cpu); register_cpu_notifier(&migration_notifier); + /* Register cpu active notifiers */ + cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); + cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); + return 0; } early_initcall(migration_init); @@ -7273,29 +7302,35 @@ int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) } #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ -#ifndef CONFIG_CPUSETS /* - * Add online and remove offline CPUs from the scheduler domains. - * When cpusets are enabled they take over this function. + * Update cpusets according to cpu_active mask. If cpusets are + * disabled, cpuset_update_active_cpus() becomes a simple wrapper + * around partition_sched_domains(). */ -static int update_sched_domains(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int __cpuexit cpuset_cpu_active(struct notifier_block *nfb, + unsigned long action, void *hcpu) { - switch (action) { + switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: - partition_sched_domains(1, NULL, NULL); + cpuset_update_active_cpus(); return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} +static int __cpuexit cpuset_cpu_inactive(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DOWN_PREPARE: + cpuset_update_active_cpus(); + return NOTIFY_OK; default: return NOTIFY_DONE; } } -#endif static int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu) @@ -7341,10 +7376,8 @@ void __init sched_init_smp(void) mutex_unlock(&sched_domains_mutex); put_online_cpus(); -#ifndef CONFIG_CPUSETS - /* XXX: Theoretical race here - CPU may be hotplugged now */ - hotcpu_notifier(update_sched_domains, 0); -#endif + hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); + hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); /* RT runtime code needs to handle some hotplug events */ hotcpu_notifier(update_runtime, 0); -- cgit v1.1 From 9ed3811a6c0d6b66e6cd47a5d7b9136386dce743 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 3 Dec 2009 15:08:03 +0900 Subject: sched: refactor try_to_wake_up() Factor ttwu_activate() and ttwu_woken_up() out of try_to_wake_up(). The factoring out doesn't affect try_to_wake_up() much code-generation-wise. Depending on configuration options, it ends up generating the same object code as before or slightly different one due to different register assignment. This is to help future implementation of try_to_wake_up_local(). Mike Galbraith suggested rename to ttwu_post_activation() from ttwu_woken_up() and comment update in try_to_wake_up(). Signed-off-by: Tejun Heo Acked-by: Peter Zijlstra Cc: Mike Galbraith Cc: Ingo Molnar --- kernel/sched.c | 83 ++++++++++++++++++++++++++++++++++------------------------ 1 file changed, 49 insertions(+), 34 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 2b942e4..96eafd5 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2267,11 +2267,52 @@ static void update_avg(u64 *avg, u64 sample) } #endif -/*** +static inline void ttwu_activate(struct task_struct *p, struct rq *rq, + bool is_sync, bool is_migrate, bool is_local, + unsigned long en_flags) +{ + schedstat_inc(p, se.statistics.nr_wakeups); + if (is_sync) + schedstat_inc(p, se.statistics.nr_wakeups_sync); + if (is_migrate) + schedstat_inc(p, se.statistics.nr_wakeups_migrate); + if (is_local) + schedstat_inc(p, se.statistics.nr_wakeups_local); + else + schedstat_inc(p, se.statistics.nr_wakeups_remote); + + activate_task(rq, p, en_flags); +} + +static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, + int wake_flags, bool success) +{ + trace_sched_wakeup(p, success); + check_preempt_curr(rq, p, wake_flags); + + p->state = TASK_RUNNING; +#ifdef CONFIG_SMP + if (p->sched_class->task_woken) + p->sched_class->task_woken(rq, p); + + if (unlikely(rq->idle_stamp)) { + u64 delta = rq->clock - rq->idle_stamp; + u64 max = 2*sysctl_sched_migration_cost; + + if (delta > max) + rq->avg_idle = max; + else + update_avg(&rq->avg_idle, delta); + rq->idle_stamp = 0; + } +#endif +} + +/** * try_to_wake_up - wake up a thread - * @p: the to-be-woken-up thread + * @p: the thread to be awakened * @state: the mask of task states that can be woken - * @sync: do a synchronous wakeup? + * @wake_flags: wake modifier flags (WF_*) * * Put it on the run-queue if it's not already there. The "current" * thread is always on the run-queue (except when the actual @@ -2279,7 +2320,8 @@ static void update_avg(u64 *avg, u64 sample) * the simpler "current->state = TASK_RUNNING" to mark yourself * runnable without the overhead of this. * - * returns failure only if the task is already active. + * Returns %true if @p was woken up, %false if it was already running + * or @state didn't match @p's state. */ static int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) @@ -2359,38 +2401,11 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, out_activate: #endif /* CONFIG_SMP */ - schedstat_inc(p, se.statistics.nr_wakeups); - if (wake_flags & WF_SYNC) - schedstat_inc(p, se.statistics.nr_wakeups_sync); - if (orig_cpu != cpu) - schedstat_inc(p, se.statistics.nr_wakeups_migrate); - if (cpu == this_cpu) - schedstat_inc(p, se.statistics.nr_wakeups_local); - else - schedstat_inc(p, se.statistics.nr_wakeups_remote); - activate_task(rq, p, en_flags); + ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu, + cpu == this_cpu, en_flags); success = 1; - out_running: - trace_sched_wakeup(p, success); - check_preempt_curr(rq, p, wake_flags); - - p->state = TASK_RUNNING; -#ifdef CONFIG_SMP - if (p->sched_class->task_woken) - p->sched_class->task_woken(rq, p); - - if (unlikely(rq->idle_stamp)) { - u64 delta = rq->clock - rq->idle_stamp; - u64 max = 2*sysctl_sched_migration_cost; - - if (delta > max) - rq->avg_idle = max; - else - update_avg(&rq->avg_idle, delta); - rq->idle_stamp = 0; - } -#endif + ttwu_post_activation(p, rq, wake_flags, success); out: task_rq_unlock(rq, &flags); put_cpu(); -- cgit v1.1 From 21aa9af03d06cb1d19a3738e5cf12acff984e69b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 8 Jun 2010 21:40:37 +0200 Subject: sched: add hooks for workqueue Concurrency managed workqueue needs to know when workers are going to sleep and waking up. Using these two hooks, cmwq keeps track of the current concurrency level and throttles execution of new works if it's too high and wakes up another worker from the sleep hook if it becomes too low. This patch introduces PF_WQ_WORKER to identify workqueue workers and adds the following two hooks. * wq_worker_waking_up(): called when a worker is woken up. * wq_worker_sleeping(): called when a worker is going to sleep and may return a pointer to a local task which should be woken up. The returned task is woken up using try_to_wake_up_local() which is simplified ttwu which is called under rq lock and can only wake up local tasks. Both hooks are currently defined as noop in kernel/workqueue_sched.h. Later cmwq implementation will replace them with proper implementation. These hooks are hard coded as they'll always be enabled. Signed-off-by: Tejun Heo Acked-by: Peter Zijlstra Cc: Mike Galbraith Cc: Ingo Molnar --- kernel/fork.c | 2 +- kernel/sched.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++-- kernel/workqueue_sched.h | 16 +++++++++++++++ 3 files changed, 68 insertions(+), 3 deletions(-) create mode 100644 kernel/workqueue_sched.h (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index b6cce14..a82a65c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -907,7 +907,7 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p) { unsigned long new_flags = p->flags; - new_flags &= ~PF_SUPERPRIV; + new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); new_flags |= PF_FORKNOEXEC; new_flags |= PF_STARTING; p->flags = new_flags; diff --git a/kernel/sched.c b/kernel/sched.c index 96eafd5..edd5a54 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -77,6 +77,7 @@ #include #include "sched_cpupri.h" +#include "workqueue_sched.h" #define CREATE_TRACE_POINTS #include @@ -2306,6 +2307,9 @@ static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, rq->idle_stamp = 0; } #endif + /* if a worker is waking up, notify workqueue */ + if ((p->flags & PF_WQ_WORKER) && success) + wq_worker_waking_up(p, cpu_of(rq)); } /** @@ -2414,6 +2418,37 @@ out: } /** + * try_to_wake_up_local - try to wake up a local task with rq lock held + * @p: the thread to be awakened + * + * Put @p on the run-queue if it's not alredy there. The caller must + * ensure that this_rq() is locked, @p is bound to this_rq() and not + * the current task. this_rq() stays locked over invocation. + */ +static void try_to_wake_up_local(struct task_struct *p) +{ + struct rq *rq = task_rq(p); + bool success = false; + + BUG_ON(rq != this_rq()); + BUG_ON(p == current); + lockdep_assert_held(&rq->lock); + + if (!(p->state & TASK_NORMAL)) + return; + + if (!p->se.on_rq) { + if (likely(!task_running(rq, p))) { + schedstat_inc(rq, ttwu_count); + schedstat_inc(rq, ttwu_local); + } + ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP); + success = true; + } + ttwu_post_activation(p, rq, 0, success); +} + +/** * wake_up_process - Wake up a specific process * @p: The process to be woken up. * @@ -3618,10 +3653,24 @@ need_resched_nonpreemptible: clear_tsk_need_resched(prev); if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { - if (unlikely(signal_pending_state(prev->state, prev))) + if (unlikely(signal_pending_state(prev->state, prev))) { prev->state = TASK_RUNNING; - else + } else { + /* + * If a worker is going to sleep, notify and + * ask workqueue whether it wants to wake up a + * task to maintain concurrency. If so, wake + * up the task. + */ + if (prev->flags & PF_WQ_WORKER) { + struct task_struct *to_wakeup; + + to_wakeup = wq_worker_sleeping(prev, cpu); + if (to_wakeup) + try_to_wake_up_local(to_wakeup); + } deactivate_task(rq, prev, DEQUEUE_SLEEP); + } switch_count = &prev->nvcsw; } diff --git a/kernel/workqueue_sched.h b/kernel/workqueue_sched.h new file mode 100644 index 0000000..af040ba --- /dev/null +++ b/kernel/workqueue_sched.h @@ -0,0 +1,16 @@ +/* + * kernel/workqueue_sched.h + * + * Scheduler hooks for concurrency managed workqueue. Only to be + * included from sched.c and workqueue.c. + */ +static inline void wq_worker_waking_up(struct task_struct *task, + unsigned int cpu) +{ +} + +static inline struct task_struct *wq_worker_sleeping(struct task_struct *task, + unsigned int cpu) +{ + return NULL; +} -- cgit v1.1 From c9cf4dbb4d9ca715d8fedf13301a53296429abc6 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 19 May 2010 21:35:17 +0200 Subject: x86: Unify dumpstack.h and stacktrace.h arch/x86/include/asm/stacktrace.h and arch/x86/kernel/dumpstack.h declare headers of objects that deal with the same topic. Actually most of the files that include stacktrace.h also include dumpstack.h Although dumpstack.h seems more reserved for internals of stack traces, those are quite often needed to define specialized stack trace operations. And perf event arch headers are going to need access to such low level operations anyway. So don't continue to bother with dumpstack.h as it's not anymore about isolated deep internals. v2: fix struct stack_frame definition conflict in sysprof Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: H. Peter Anvin Cc: Thomas Gleixner Cc: Soeren Sandmann --- kernel/trace/trace_sysprof.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index a7974a5..c080956 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c @@ -33,12 +33,13 @@ static DEFINE_MUTEX(sample_timer_lock); */ static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer); -struct stack_frame { +struct stack_frame_user { const void __user *next_fp; unsigned long return_address; }; -static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) +static int +copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) { int ret; @@ -125,7 +126,7 @@ trace_kernel(struct pt_regs *regs, struct trace_array *tr, static void timer_notify(struct pt_regs *regs, int cpu) { struct trace_array_cpu *data; - struct stack_frame frame; + struct stack_frame_user frame; struct trace_array *tr; const void __user *fp; int is_user; -- cgit v1.1 From b0f82b81fe6bbcf78d478071f33e44554726bc81 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 20 May 2010 07:47:21 +0200 Subject: perf: Drop the skip argument from perf_arch_fetch_regs_caller Drop this argument now that we always want to rewind only to the state of the first caller. It means frame pointers are not necessary anymore to reliably get the source of an event. But this also means we need this helper to be a macro now, as an inline function is not an option since we need to know when to provide a default implentation. Signed-off-by: Frederic Weisbecker Signed-off-by: Paul Mackerras Cc: David Miller Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo --- kernel/perf_event.c | 5 ----- kernel/trace/trace_event_perf.c | 2 -- 2 files changed, 7 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index e099650..9ae4dbc 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2851,11 +2851,6 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return NULL; } -__weak -void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) -{ -} - /* * We assume there is only KVM supporting the callbacks. diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index cb6f365..21db1d3 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -9,8 +9,6 @@ #include #include "trace.h" -EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); - static char *perf_trace_buf[4]; /* -- cgit v1.1 From 30dbb20e68e6f7df974b77d2350ebad5eb6f6c9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Am=C3=A9rico=20Wang?= Date: Wed, 26 May 2010 18:57:53 +0800 Subject: tracing: Remove boot tracer The boot tracer is useless. It simply logs the initcalls but in fact these initcalls are also logged through printk while using the initcall_debug kernel parameter. Nobody seem to be using it so far. Then just remove it. Signed-off-by: WANG Cong Cc: Chase Douglas Cc: Steven Rostedt Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Li Zefan LKML-Reference: <20100526105753.GA5677@cr0.nay.redhat.com> [ remove the hooks in main.c, and the headers ] Signed-off-by: Frederic Weisbecker --- kernel/trace/Kconfig | 17 ---- kernel/trace/Makefile | 1 - kernel/trace/trace.c | 3 - kernel/trace/trace.h | 8 -- kernel/trace/trace_boot.c | 185 ------------------------------------------- kernel/trace/trace_entries.h | 27 ------- 6 files changed, 241 deletions(-) delete mode 100644 kernel/trace/trace_boot.c (limited to 'kernel') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 8b1797c..572992a 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -229,23 +229,6 @@ config FTRACE_SYSCALLS help Basic tracer to catch the syscall entry and exit events. -config BOOT_TRACER - bool "Trace boot initcalls" - select GENERIC_TRACER - select CONTEXT_SWITCH_TRACER - help - This tracer helps developers to optimize boot times: it records - the timings of the initcalls and traces key events and the identity - of tasks that can cause boot delays, such as context-switches. - - Its aim is to be parsed by the scripts/bootgraph.pl tool to - produce pretty graphics about boot inefficiencies, giving a visual - representation of the delays during initcalls - but the raw - /debug/tracing/trace text output is readable too. - - You must pass in initcall_debug and ftrace=initcall to the kernel - command line to enable this on bootup. - config TRACE_BRANCH_PROFILING bool select GENERIC_TRACER diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index ffb1a5b..c3aaeba 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -38,7 +38,6 @@ obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o obj-$(CONFIG_NOP_TRACER) += trace_nop.o obj-$(CONFIG_STACK_TRACER) += trace_stack.o obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o -obj-$(CONFIG_BOOT_TRACER) += trace_boot.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o obj-$(CONFIG_KMEMTRACE) += kmemtrace.o diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 55e4851..036fbc2 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4603,9 +4603,6 @@ __init static int tracer_alloc_buffers(void) register_tracer(&nop_trace); current_trace = &nop_trace; -#ifdef CONFIG_BOOT_TRACER - register_tracer(&boot_tracer); -#endif /* All seems OK, enable tracing */ tracing_disabled = 0; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 2cd9639..75a5e80 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -9,10 +9,8 @@ #include #include #include -#include #include #include - #include #include @@ -29,8 +27,6 @@ enum trace_type { TRACE_MMIO_RW, TRACE_MMIO_MAP, TRACE_BRANCH, - TRACE_BOOT_CALL, - TRACE_BOOT_RET, TRACE_GRAPH_RET, TRACE_GRAPH_ENT, TRACE_USER_STACK, @@ -48,8 +44,6 @@ enum kmemtrace_type_id { KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */ }; -extern struct tracer boot_tracer; - #undef __field #define __field(type, item) type item; @@ -209,8 +203,6 @@ extern void __ftrace_bad_type(void); TRACE_MMIO_RW); \ IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ TRACE_MMIO_MAP); \ - IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ - IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ TRACE_GRAPH_ENT); \ diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c deleted file mode 100644 index c21d5f3..0000000 --- a/kernel/trace/trace_boot.c +++ /dev/null @@ -1,185 +0,0 @@ -/* - * ring buffer based initcalls tracer - * - * Copyright (C) 2008 Frederic Weisbecker - * - */ - -#include -#include -#include -#include -#include - -#include "trace.h" -#include "trace_output.h" - -static struct trace_array *boot_trace; -static bool pre_initcalls_finished; - -/* Tells the boot tracer that the pre_smp_initcalls are finished. - * So we are ready . - * It doesn't enable sched events tracing however. - * You have to call enable_boot_trace to do so. - */ -void start_boot_trace(void) -{ - pre_initcalls_finished = true; -} - -void enable_boot_trace(void) -{ - if (boot_trace && pre_initcalls_finished) - tracing_start_sched_switch_record(); -} - -void disable_boot_trace(void) -{ - if (boot_trace && pre_initcalls_finished) - tracing_stop_sched_switch_record(); -} - -static int boot_trace_init(struct trace_array *tr) -{ - boot_trace = tr; - - if (!tr) - return 0; - - tracing_reset_online_cpus(tr); - - tracing_sched_switch_assign_trace(tr); - return 0; -} - -static enum print_line_t -initcall_call_print_line(struct trace_iterator *iter) -{ - struct trace_entry *entry = iter->ent; - struct trace_seq *s = &iter->seq; - struct trace_boot_call *field; - struct boot_trace_call *call; - u64 ts; - unsigned long nsec_rem; - int ret; - - trace_assign_type(field, entry); - call = &field->boot_call; - ts = iter->ts; - nsec_rem = do_div(ts, NSEC_PER_SEC); - - ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n", - (unsigned long)ts, nsec_rem, call->func, call->caller); - - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - else - return TRACE_TYPE_HANDLED; -} - -static enum print_line_t -initcall_ret_print_line(struct trace_iterator *iter) -{ - struct trace_entry *entry = iter->ent; - struct trace_seq *s = &iter->seq; - struct trace_boot_ret *field; - struct boot_trace_ret *init_ret; - u64 ts; - unsigned long nsec_rem; - int ret; - - trace_assign_type(field, entry); - init_ret = &field->boot_ret; - ts = iter->ts; - nsec_rem = do_div(ts, NSEC_PER_SEC); - - ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s " - "returned %d after %llu msecs\n", - (unsigned long) ts, - nsec_rem, - init_ret->func, init_ret->result, init_ret->duration); - - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - else - return TRACE_TYPE_HANDLED; -} - -static enum print_line_t initcall_print_line(struct trace_iterator *iter) -{ - struct trace_entry *entry = iter->ent; - - switch (entry->type) { - case TRACE_BOOT_CALL: - return initcall_call_print_line(iter); - case TRACE_BOOT_RET: - return initcall_ret_print_line(iter); - default: - return TRACE_TYPE_UNHANDLED; - } -} - -struct tracer boot_tracer __read_mostly = -{ - .name = "initcall", - .init = boot_trace_init, - .reset = tracing_reset_online_cpus, - .print_line = initcall_print_line, -}; - -void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) -{ - struct ftrace_event_call *call = &event_boot_call; - struct ring_buffer_event *event; - struct ring_buffer *buffer; - struct trace_boot_call *entry; - struct trace_array *tr = boot_trace; - - if (!tr || !pre_initcalls_finished) - return; - - /* Get its name now since this function could - * disappear because it is in the .init section. - */ - sprint_symbol(bt->func, (unsigned long)fn); - preempt_disable(); - - buffer = tr->buffer; - event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_CALL, - sizeof(*entry), 0, 0); - if (!event) - goto out; - entry = ring_buffer_event_data(event); - entry->boot_call = *bt; - if (!filter_check_discard(call, entry, buffer, event)) - trace_buffer_unlock_commit(buffer, event, 0, 0); - out: - preempt_enable(); -} - -void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) -{ - struct ftrace_event_call *call = &event_boot_ret; - struct ring_buffer_event *event; - struct ring_buffer *buffer; - struct trace_boot_ret *entry; - struct trace_array *tr = boot_trace; - - if (!tr || !pre_initcalls_finished) - return; - - sprint_symbol(bt->func, (unsigned long)fn); - preempt_disable(); - - buffer = tr->buffer; - event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_RET, - sizeof(*entry), 0, 0); - if (!event) - goto out; - entry = ring_buffer_event_data(event); - entry->boot_ret = *bt; - if (!filter_check_discard(call, entry, buffer, event)) - trace_buffer_unlock_commit(buffer, event, 0, 0); - out: - preempt_enable(); -} diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index dc008c1..c293364 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -271,33 +271,6 @@ FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map, __entry->map_id, __entry->opcode) ); -FTRACE_ENTRY(boot_call, trace_boot_call, - - TRACE_BOOT_CALL, - - F_STRUCT( - __field_struct( struct boot_trace_call, boot_call ) - __field_desc( pid_t, boot_call, caller ) - __array_desc( char, boot_call, func, KSYM_SYMBOL_LEN) - ), - - F_printk("%d %s", __entry->caller, __entry->func) -); - -FTRACE_ENTRY(boot_ret, trace_boot_ret, - - TRACE_BOOT_RET, - - F_STRUCT( - __field_struct( struct boot_trace_ret, boot_ret ) - __array_desc( char, boot_ret, func, KSYM_SYMBOL_LEN) - __field_desc( int, boot_ret, result ) - __field_desc( unsigned long, boot_ret, duration ) - ), - - F_printk("%s %d %lx", - __entry->func, __entry->result, __entry->duration) -); #define TRACE_FUNC_SIZE 30 #define TRACE_FILE_SIZE 20 -- cgit v1.1 From c676329abb2b8359d9a5d734dec0c81779823fd6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 25 May 2010 10:48:51 +0200 Subject: sched_clock: Add local_clock() API and improve documentation For people who otherwise get to write: cpu_clock(smp_processor_id()), there is now: local_clock(). Also, as per suggestion from Andrew, provide some documentation on the various clock interfaces, and minimize the unsigned long long vs u64 mess. Signed-off-by: Peter Zijlstra Cc: Andrew Morton Cc: Linus Torvalds Cc: Jens Axboe LKML-Reference: <1275052414.1645.52.camel@laptop> Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 2 +- kernel/perf_event.c | 2 +- kernel/rcutorture.c | 3 +- kernel/sched.c | 2 +- kernel/sched_clock.c | 95 +++++++++++++++++++++++++++++++++++++++++----- kernel/trace/trace_clock.c | 2 +- 6 files changed, 90 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 5428679..f2852a5 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -146,7 +146,7 @@ static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], static inline u64 lockstat_clock(void) { - return cpu_clock(smp_processor_id()); + return local_clock(); } static int lock_point(unsigned long points[], unsigned long ip) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 31d6afe..109c5ec 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -214,7 +214,7 @@ static void perf_unpin_context(struct perf_event_context *ctx) static inline u64 perf_clock(void) { - return cpu_clock(raw_smp_processor_id()); + return local_clock(); } /* diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 6535ac8..2e2726d 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -239,8 +239,7 @@ static unsigned long rcu_random(struct rcu_random_state *rrsp) { if (--rrsp->rrs_count < 0) { - rrsp->rrs_state += - (unsigned long)cpu_clock(raw_smp_processor_id()); + rrsp->rrs_state += (unsigned long)local_clock(); rrsp->rrs_count = RCU_RANDOM_REFRESH; } rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD; diff --git a/kernel/sched.c b/kernel/sched.c index 8f351c5..3abd8f7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1647,7 +1647,7 @@ static void update_shares(struct sched_domain *sd) if (root_task_group_empty()) return; - now = cpu_clock(raw_smp_processor_id()); + now = local_clock(); elapsed = now - sd->last_update; if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 906a0f7..52f1a14 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c @@ -10,19 +10,55 @@ * Ingo Molnar * Guillaume Chazarain * - * Create a semi stable clock from a mixture of other events, including: - * - gtod + * + * What: + * + * cpu_clock(i) provides a fast (execution time) high resolution + * clock with bounded drift between CPUs. The value of cpu_clock(i) + * is monotonic for constant i. The timestamp returned is in nanoseconds. + * + * ######################### BIG FAT WARNING ########################## + * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # + * # go backwards !! # + * #################################################################### + * + * There is no strict promise about the base, although it tends to start + * at 0 on boot (but people really shouldn't rely on that). + * + * cpu_clock(i) -- can be used from any context, including NMI. + * sched_clock_cpu(i) -- must be used with local IRQs disabled (implied by NMI) + * local_clock() -- is cpu_clock() on the current cpu. + * + * How: + * + * The implementation either uses sched_clock() when + * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the + * sched_clock() is assumed to provide these properties (mostly it means + * the architecture provides a globally synchronized highres time source). + * + * Otherwise it tries to create a semi stable clock from a mixture of other + * clocks, including: + * + * - GTOD (clock monotomic) * - sched_clock() * - explicit idle events * - * We use gtod as base and the unstable clock deltas. The deltas are filtered, - * making it monotonic and keeping it within an expected window. + * We use GTOD as base and use sched_clock() deltas to improve resolution. The + * deltas are filtered to provide monotonicity and keeping it within an + * expected window. * * Furthermore, explicit sleep and wakeup hooks allow us to account for time * that is otherwise invisible (TSC gets stopped). * - * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat - * consistent between cpus (never more than 2 jiffies difference). + * + * Notes: + * + * The !IRQ-safetly of sched_clock() and sched_clock_cpu() comes from things + * like cpufreq interrupts that can change the base clock (TSC) multiplier + * and cause funny jumps in time -- although the filtering provided by + * sched_clock_cpu() should mitigate serious artifacts we cannot rely on it + * in general since for !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK we fully rely on + * sched_clock(). */ #include #include @@ -170,6 +206,11 @@ again: return val; } +/* + * Similar to cpu_clock(), but requires local IRQs to be disabled. + * + * See cpu_clock(). + */ u64 sched_clock_cpu(int cpu) { struct sched_clock_data *scd; @@ -237,9 +278,19 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) } EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); -unsigned long long cpu_clock(int cpu) +/* + * As outlined at the top, provides a fast, high resolution, nanosecond + * time source that is monotonic per cpu argument and has bounded drift + * between cpus. + * + * ######################### BIG FAT WARNING ########################## + * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # + * # go backwards !! # + * #################################################################### + */ +u64 cpu_clock(int cpu) { - unsigned long long clock; + u64 clock; unsigned long flags; local_irq_save(flags); @@ -249,6 +300,25 @@ unsigned long long cpu_clock(int cpu) return clock; } +/* + * Similar to cpu_clock() for the current cpu. Time will only be observed + * to be monotonic if care is taken to only compare timestampt taken on the + * same CPU. + * + * See cpu_clock(). + */ +u64 local_clock(void) +{ + u64 clock; + unsigned long flags; + + local_irq_save(flags); + clock = sched_clock_cpu(smp_processor_id()); + local_irq_restore(flags); + + return clock; +} + #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ void sched_clock_init(void) @@ -264,12 +334,17 @@ u64 sched_clock_cpu(int cpu) return sched_clock(); } - -unsigned long long cpu_clock(int cpu) +u64 cpu_clock(int cpu) { return sched_clock_cpu(cpu); } +u64 local_clock(void) +{ + return sched_clock_cpu(0); +} + #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ EXPORT_SYMBOL_GPL(cpu_clock); +EXPORT_SYMBOL_GPL(local_clock); diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 9d589d8..1723e2b 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -56,7 +56,7 @@ u64 notrace trace_clock_local(void) */ u64 notrace trace_clock(void) { - return cpu_clock(raw_smp_processor_id()); + return local_clock(); } -- cgit v1.1 From 246d86b51845063e4b06b27579990492dc5fa317 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 19 May 2010 14:57:11 +0200 Subject: sched: Simplify the reacquire_kernel_lock() logic - Contrary to what 6d558c3a says, there is no need to reload prev = rq->curr after the context switch. You always schedule back to where you came from, prev must be equal to current even if cpu/rq was changed. - This also means reacquire_kernel_lock() can use prev instead of current. - No need to reassign switch_count if reacquire_kernel_lock() reports need_resched(), we can just move the initial assignment down, under the "need_resched_nonpreemptible:" label. - Try to update the comment after context_switch(). Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra LKML-Reference: <20100519125711.GA30199@redhat.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 3abd8f7..f37a961 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3636,7 +3636,6 @@ need_resched: rq = cpu_rq(cpu); rcu_note_context_switch(cpu); prev = rq->curr; - switch_count = &prev->nivcsw; release_kernel_lock(prev); need_resched_nonpreemptible: @@ -3649,6 +3648,7 @@ need_resched_nonpreemptible: raw_spin_lock_irq(&rq->lock); clear_tsk_need_resched(prev); + switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { if (unlikely(signal_pending_state(prev->state, prev))) { prev->state = TASK_RUNNING; @@ -3689,8 +3689,10 @@ need_resched_nonpreemptible: context_switch(rq, prev, next); /* unlocks the rq */ /* - * the context switch might have flipped the stack from under - * us, hence refresh the local variables. + * The context switch have flipped the stack from under us + * and restored the local variables which were saved when + * this task called schedule() in the past. prev == current + * is still correct, but it can be moved to another cpu/rq. */ cpu = smp_processor_id(); rq = cpu_rq(cpu); @@ -3699,11 +3701,8 @@ need_resched_nonpreemptible: post_schedule(rq); - if (unlikely(reacquire_kernel_lock(current) < 0)) { - prev = rq->curr; - switch_count = &prev->nivcsw; + if (unlikely(reacquire_kernel_lock(prev))) goto need_resched_nonpreemptible; - } preempt_enable_no_resched(); if (need_resched()) -- cgit v1.1 From fdf3e95d3916f18bf8703fb065499fdbc4dfe34c Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Mon, 17 May 2010 18:14:43 -0700 Subject: sched: Avoid side-effect of tickless idle on update_cpu_load tickless idle has a negative side effect on update_cpu_load(), which in turn can affect load balancing behavior. update_cpu_load() is supposed to be called every tick, to keep track of various load indicies. With tickless idle, there are no scheduler ticks called on the idle CPUs. Idle CPUs may still do load balancing (with idle_load_balance CPU) using the stale cpu_load. It will also cause problems when all CPUs go idle for a while and become active again. In this case loads would not degrade as expected. This is how rq->nr_load_updates change looks like under different conditions: All CPUS idle for 10 seconds (HZ=1000) 0 1621 10 496 11 139 12 875 13 1672 14 12 15 21 1 1472 2 2426 3 1161 4 2108 5 1525 6 701 7 249 8 766 9 1967 One CPU busy rest idle for 10 seconds 0 10003 10 601 11 95 12 966 13 1597 14 114 15 98 1 3457 2 93 3 6679 4 1425 5 1479 6 595 7 193 8 633 9 1687 All CPUs busy for 10 seconds 0 10026 10 10026 11 10026 12 10026 13 10025 14 10025 15 10025 1 10026 2 10026 3 10026 4 10026 5 10026 6 10026 7 10026 8 10026 9 10026 That is update_cpu_load works properly only when all CPUs are busy. If all are idle, all the CPUs get way lower updates. And when few CPUs are busy and rest are idle, only busy and ilb CPU does proper updates and rest of the idle CPUs will do lower updates. The patch keeps track of when a last update was done and fixes up the load avg based on current time. On one of my test system SPECjbb with warehouse 1..numcpus, patch improves throughput numbers by ~1% (average of 6 runs). On another test system (with different domain hierarchy) there is no noticable change in perf. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Peter Zijlstra Cc: Thomas Gleixner LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/sched.c | 100 +++++++++++++++++++++++++++++++++++++++++++++++++--- kernel/sched_fair.c | 5 ++- 2 files changed, 99 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index f37a961..a757f6b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -457,6 +457,7 @@ struct rq { unsigned long nr_running; #define CPU_LOAD_IDX_MAX 5 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; + unsigned long last_load_update_tick; #ifdef CONFIG_NO_HZ u64 nohz_stamp; unsigned char in_nohz_recently; @@ -1803,6 +1804,7 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) static void calc_load_account_idle(struct rq *this_rq); static void update_sysctl(void); static int get_update_sysctl_factor(void); +static void update_cpu_load(struct rq *this_rq); static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) { @@ -3050,23 +3052,102 @@ static void calc_load_account_active(struct rq *this_rq) } /* + * The exact cpuload at various idx values, calculated at every tick would be + * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load + * + * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called + * on nth tick when cpu may be busy, then we have: + * load = ((2^idx - 1) / 2^idx)^(n-1) * load + * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load + * + * decay_load_missed() below does efficient calculation of + * load = ((2^idx - 1) / 2^idx)^(n-1) * load + * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load + * + * The calculation is approximated on a 128 point scale. + * degrade_zero_ticks is the number of ticks after which load at any + * particular idx is approximated to be zero. + * degrade_factor is a precomputed table, a row for each load idx. + * Each column corresponds to degradation factor for a power of two ticks, + * based on 128 point scale. + * Example: + * row 2, col 3 (=12) says that the degradation at load idx 2 after + * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8). + * + * With this power of 2 load factors, we can degrade the load n times + * by looking at 1 bits in n and doing as many mult/shift instead of + * n mult/shifts needed by the exact degradation. + */ +#define DEGRADE_SHIFT 7 +static const unsigned char + degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128}; +static const unsigned char + degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = { + {0, 0, 0, 0, 0, 0, 0, 0}, + {64, 32, 8, 0, 0, 0, 0, 0}, + {96, 72, 40, 12, 1, 0, 0}, + {112, 98, 75, 43, 15, 1, 0}, + {120, 112, 98, 76, 45, 16, 2} }; + +/* + * Update cpu_load for any missed ticks, due to tickless idle. The backlog + * would be when CPU is idle and so we just decay the old load without + * adding any new load. + */ +static unsigned long +decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) +{ + int j = 0; + + if (!missed_updates) + return load; + + if (missed_updates >= degrade_zero_ticks[idx]) + return 0; + + if (idx == 1) + return load >> missed_updates; + + while (missed_updates) { + if (missed_updates % 2) + load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; + + missed_updates >>= 1; + j++; + } + return load; +} + +/* * Update rq->cpu_load[] statistics. This function is usually called every - * scheduler tick (TICK_NSEC). + * scheduler tick (TICK_NSEC). With tickless idle this will not be called + * every tick. We fix it up based on jiffies. */ static void update_cpu_load(struct rq *this_rq) { unsigned long this_load = this_rq->load.weight; + unsigned long curr_jiffies = jiffies; + unsigned long pending_updates; int i, scale; this_rq->nr_load_updates++; + /* Avoid repeated calls on same jiffy, when moving in and out of idle */ + if (curr_jiffies == this_rq->last_load_update_tick) + return; + + pending_updates = curr_jiffies - this_rq->last_load_update_tick; + this_rq->last_load_update_tick = curr_jiffies; + /* Update our load: */ - for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { + this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ + for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { unsigned long old_load, new_load; /* scale is effectively 1 << i now, and >> i divides by scale */ old_load = this_rq->cpu_load[i]; + old_load = decay_load_missed(old_load, pending_updates - 1, i); new_load = this_load; /* * Round up the averaging division if load is increasing. This @@ -3074,9 +3155,15 @@ static void update_cpu_load(struct rq *this_rq) * example. */ if (new_load > old_load) - new_load += scale-1; - this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i; + new_load += scale - 1; + + this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; } +} + +static void update_cpu_load_active(struct rq *this_rq) +{ + update_cpu_load(this_rq); calc_load_account_active(this_rq); } @@ -3464,7 +3551,7 @@ void scheduler_tick(void) raw_spin_lock(&rq->lock); update_rq_clock(rq); - update_cpu_load(rq); + update_cpu_load_active(rq); curr->sched_class->task_tick(rq, curr, 0); raw_spin_unlock(&rq->lock); @@ -7688,6 +7775,9 @@ void __init sched_init(void) for (j = 0; j < CPU_LOAD_IDX_MAX; j++) rq->cpu_load[j] = 0; + + rq->last_load_update_tick = jiffies; + #ifdef CONFIG_SMP rq->sd = NULL; rq->rd = NULL; diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index eed35ed..22b8b4f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -3420,9 +3420,12 @@ static void run_rebalance_domains(struct softirq_action *h) if (need_resched()) break; + rq = cpu_rq(balance_cpu); + raw_spin_lock_irq(&rq->lock); + update_cpu_load(rq); + raw_spin_unlock_irq(&rq->lock); rebalance_domains(balance_cpu, CPU_IDLE); - rq = cpu_rq(balance_cpu); if (time_after(this_rq->next_balance, rq->next_balance)) this_rq->next_balance = rq->next_balance; } -- cgit v1.1 From 83cd4fe27ad8446619b2e030b171b858501de87d Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Fri, 21 May 2010 17:09:41 -0700 Subject: sched: Change nohz idle load balancing logic to push model In the new push model, all idle CPUs indeed go into nohz mode. There is still the concept of idle load balancer (performing the load balancing on behalf of all the idle cpu's in the system). Busy CPU kicks the nohz balancer when any of the nohz CPUs need idle load balancing. The kickee CPU does the idle load balancing on behalf of all idle CPUs instead of the normal idle balance. This addresses the below two problems with the current nohz ilb logic: * the idle load balancer continued to have periodic ticks during idle and wokeup frequently, even though it did not have any rebalancing to do on behalf of any of the idle CPUs. * On x86 and CPUs that have APIC timer stoppage on idle CPUs, this periodic wakeup can result in a periodic additional interrupt on a CPU doing the timer broadcast. Also currently we are migrating the unpinned timers from an idle to the cpu doing idle load balancing (when all the cpus in the system are idle, there is no idle load balancing cpu and timers get added to the same idle cpu where the request was made. So the existing optimization works only on semi idle system). And In semi idle system, we no longer have periodic ticks on the idle load balancer CPU. Using that cpu will add more delays to the timers than intended (as that cpu's timer base may not be uptodate wrt jiffies etc). This was causing mysterious slowdowns during boot etc. For now, in the semi idle case, use the nearest busy cpu for migrating timers from an idle cpu. This is good for power-savings anyway. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Peter Zijlstra Cc: Thomas Gleixner LKML-Reference: <1274486981.2840.46.camel@sbs-t61.sc.intel.com> Signed-off-by: Ingo Molnar --- kernel/hrtimer.c | 8 +- kernel/sched.c | 34 ++++- kernel/sched_fair.c | 329 ++++++++++++++++++++++++++++------------------- kernel/time/tick-sched.c | 8 +- kernel/timer.c | 8 +- 5 files changed, 234 insertions(+), 153 deletions(-) (limited to 'kernel') diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 5c69e99..e934339 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -144,12 +144,8 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, static int hrtimer_get_target(int this_cpu, int pinned) { #ifdef CONFIG_NO_HZ - if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) { - int preferred_cpu = get_nohz_load_balancer(); - - if (preferred_cpu >= 0) - return preferred_cpu; - } + if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) + return get_nohz_timer_target(); #endif return this_cpu; } diff --git a/kernel/sched.c b/kernel/sched.c index a757f6b..132950b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -460,7 +460,7 @@ struct rq { unsigned long last_load_update_tick; #ifdef CONFIG_NO_HZ u64 nohz_stamp; - unsigned char in_nohz_recently; + unsigned char nohz_balance_kick; #endif unsigned int skip_clock_update; @@ -1195,6 +1195,27 @@ static void resched_cpu(int cpu) #ifdef CONFIG_NO_HZ /* + * In the semi idle case, use the nearest busy cpu for migrating timers + * from an idle cpu. This is good for power-savings. + * + * We don't do similar optimization for completely idle system, as + * selecting an idle cpu will add more delays to the timers than intended + * (as that cpu's timer base may not be uptodate wrt jiffies etc). + */ +int get_nohz_timer_target(void) +{ + int cpu = smp_processor_id(); + int i; + struct sched_domain *sd; + + for_each_domain(cpu, sd) { + for_each_cpu(i, sched_domain_span(sd)) + if (!idle_cpu(i)) + return i; + } + return cpu; +} +/* * When add_timer_on() enqueues a timer into the timer wheel of an * idle CPU then this timer might expire before the next timer event * which is scheduled to wake up that CPU. In case of a completely @@ -7791,6 +7812,10 @@ void __init sched_init(void) rq->idle_stamp = 0; rq->avg_idle = 2*sysctl_sched_migration_cost; rq_attach_root(rq, &def_root_domain); +#ifdef CONFIG_NO_HZ + rq->nohz_balance_kick = 0; + init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i)); +#endif #endif init_rq_hrtick(rq); atomic_set(&rq->nr_iowait, 0); @@ -7835,8 +7860,11 @@ void __init sched_init(void) zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); #ifdef CONFIG_SMP #ifdef CONFIG_NO_HZ - zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); - alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); + zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); + alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT); + atomic_set(&nohz.load_balancer, nr_cpu_ids); + atomic_set(&nohz.first_pick_cpu, nr_cpu_ids); + atomic_set(&nohz.second_pick_cpu, nr_cpu_ids); #endif /* May be allocated at isolcpus cmdline parse time */ if (cpu_isolated_map == NULL) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 22b8b4f..6ee2e0a 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -3091,13 +3091,40 @@ out_unlock: } #ifdef CONFIG_NO_HZ + +static DEFINE_PER_CPU(struct call_single_data, remote_sched_softirq_cb); + +static void trigger_sched_softirq(void *data) +{ + raise_softirq_irqoff(SCHED_SOFTIRQ); +} + +static inline void init_sched_softirq_csd(struct call_single_data *csd) +{ + csd->func = trigger_sched_softirq; + csd->info = NULL; + csd->flags = 0; + csd->priv = 0; +} + +/* + * idle load balancing details + * - One of the idle CPUs nominates itself as idle load_balancer, while + * entering idle. + * - This idle load balancer CPU will also go into tickless mode when + * it is idle, just like all other idle CPUs + * - When one of the busy CPUs notice that there may be an idle rebalancing + * needed, they will kick the idle load balancer, which then does idle + * load balancing for all the idle CPUs. + */ static struct { atomic_t load_balancer; - cpumask_var_t cpu_mask; - cpumask_var_t ilb_grp_nohz_mask; -} nohz ____cacheline_aligned = { - .load_balancer = ATOMIC_INIT(-1), -}; + atomic_t first_pick_cpu; + atomic_t second_pick_cpu; + cpumask_var_t idle_cpus_mask; + cpumask_var_t grp_idle_mask; + unsigned long next_balance; /* in jiffy units */ +} nohz ____cacheline_aligned; int get_nohz_load_balancer(void) { @@ -3151,17 +3178,17 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) */ static inline int is_semi_idle_group(struct sched_group *ilb_group) { - cpumask_and(nohz.ilb_grp_nohz_mask, nohz.cpu_mask, + cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask, sched_group_cpus(ilb_group)); /* * A sched_group is semi-idle when it has atleast one busy cpu * and atleast one idle cpu. */ - if (cpumask_empty(nohz.ilb_grp_nohz_mask)) + if (cpumask_empty(nohz.grp_idle_mask)) return 0; - if (cpumask_equal(nohz.ilb_grp_nohz_mask, sched_group_cpus(ilb_group))) + if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group))) return 0; return 1; @@ -3194,7 +3221,7 @@ static int find_new_ilb(int cpu) * Optimize for the case when we have no idle CPUs or only one * idle CPU. Don't walk the sched_domain hierarchy in such cases */ - if (cpumask_weight(nohz.cpu_mask) < 2) + if (cpumask_weight(nohz.idle_cpus_mask) < 2) goto out_done; for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) { @@ -3202,7 +3229,7 @@ static int find_new_ilb(int cpu) do { if (is_semi_idle_group(ilb_group)) - return cpumask_first(nohz.ilb_grp_nohz_mask); + return cpumask_first(nohz.grp_idle_mask); ilb_group = ilb_group->next; @@ -3210,98 +3237,116 @@ static int find_new_ilb(int cpu) } out_done: - return cpumask_first(nohz.cpu_mask); + return nr_cpu_ids; } #else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */ static inline int find_new_ilb(int call_cpu) { - return cpumask_first(nohz.cpu_mask); + return nr_cpu_ids; } #endif /* + * Kick a CPU to do the nohz balancing, if it is time for it. We pick the + * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle + * CPU (if there is one). + */ +static void nohz_balancer_kick(int cpu) +{ + int ilb_cpu; + + nohz.next_balance++; + + ilb_cpu = get_nohz_load_balancer(); + + if (ilb_cpu >= nr_cpu_ids) { + ilb_cpu = cpumask_first(nohz.idle_cpus_mask); + if (ilb_cpu >= nr_cpu_ids) + return; + } + + if (!cpu_rq(ilb_cpu)->nohz_balance_kick) { + struct call_single_data *cp; + + cpu_rq(ilb_cpu)->nohz_balance_kick = 1; + cp = &per_cpu(remote_sched_softirq_cb, cpu); + __smp_call_function_single(ilb_cpu, cp, 0); + } + return; +} + +/* * This routine will try to nominate the ilb (idle load balancing) * owner among the cpus whose ticks are stopped. ilb owner will do the idle - * load balancing on behalf of all those cpus. If all the cpus in the system - * go into this tickless mode, then there will be no ilb owner (as there is - * no need for one) and all the cpus will sleep till the next wakeup event - * arrives... - * - * For the ilb owner, tick is not stopped. And this tick will be used - * for idle load balancing. ilb owner will still be part of - * nohz.cpu_mask.. + * load balancing on behalf of all those cpus. * - * While stopping the tick, this cpu will become the ilb owner if there - * is no other owner. And will be the owner till that cpu becomes busy - * or if all cpus in the system stop their ticks at which point - * there is no need for ilb owner. + * When the ilb owner becomes busy, we will not have new ilb owner until some + * idle CPU wakes up and goes back to idle or some busy CPU tries to kick + * idle load balancing by kicking one of the idle CPUs. * - * When the ilb owner becomes busy, it nominates another owner, during the - * next busy scheduler_tick() + * Ticks are stopped for the ilb owner as well, with busy CPU kicking this + * ilb owner CPU in future (when there is a need for idle load balancing on + * behalf of all idle CPUs). */ -int select_nohz_load_balancer(int stop_tick) +void select_nohz_load_balancer(int stop_tick) { int cpu = smp_processor_id(); if (stop_tick) { - cpu_rq(cpu)->in_nohz_recently = 1; - if (!cpu_active(cpu)) { if (atomic_read(&nohz.load_balancer) != cpu) - return 0; + return; /* * If we are going offline and still the leader, * give up! */ - if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) + if (atomic_cmpxchg(&nohz.load_balancer, cpu, + nr_cpu_ids) != cpu) BUG(); - return 0; + return; } - cpumask_set_cpu(cpu, nohz.cpu_mask); + cpumask_set_cpu(cpu, nohz.idle_cpus_mask); - /* time for ilb owner also to sleep */ - if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) { - if (atomic_read(&nohz.load_balancer) == cpu) - atomic_set(&nohz.load_balancer, -1); - return 0; - } + if (atomic_read(&nohz.first_pick_cpu) == cpu) + atomic_cmpxchg(&nohz.first_pick_cpu, cpu, nr_cpu_ids); + if (atomic_read(&nohz.second_pick_cpu) == cpu) + atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids); - if (atomic_read(&nohz.load_balancer) == -1) { - /* make me the ilb owner */ - if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1) - return 1; - } else if (atomic_read(&nohz.load_balancer) == cpu) { + if (atomic_read(&nohz.load_balancer) >= nr_cpu_ids) { int new_ilb; - if (!(sched_smt_power_savings || - sched_mc_power_savings)) - return 1; + /* make me the ilb owner */ + if (atomic_cmpxchg(&nohz.load_balancer, nr_cpu_ids, + cpu) != nr_cpu_ids) + return; + /* * Check to see if there is a more power-efficient * ilb. */ new_ilb = find_new_ilb(cpu); if (new_ilb < nr_cpu_ids && new_ilb != cpu) { - atomic_set(&nohz.load_balancer, -1); + atomic_set(&nohz.load_balancer, nr_cpu_ids); resched_cpu(new_ilb); - return 0; + return; } - return 1; + return; } } else { - if (!cpumask_test_cpu(cpu, nohz.cpu_mask)) - return 0; + if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) + return; - cpumask_clear_cpu(cpu, nohz.cpu_mask); + cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); if (atomic_read(&nohz.load_balancer) == cpu) - if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) + if (atomic_cmpxchg(&nohz.load_balancer, cpu, + nr_cpu_ids) != cpu) BUG(); } - return 0; + return; } #endif @@ -3383,11 +3428,101 @@ out: rq->next_balance = next_balance; } +#ifdef CONFIG_NO_HZ /* - * run_rebalance_domains is triggered when needed from the scheduler tick. - * In CONFIG_NO_HZ case, the idle load balance owner will do the + * In CONFIG_NO_HZ case, the idle balance kickee will do the * rebalancing for all the cpus for whom scheduler ticks are stopped. */ +static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) +{ + struct rq *this_rq = cpu_rq(this_cpu); + struct rq *rq; + int balance_cpu; + + if (idle != CPU_IDLE || !this_rq->nohz_balance_kick) + return; + + for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { + if (balance_cpu == this_cpu) + continue; + + /* + * If this cpu gets work to do, stop the load balancing + * work being done for other cpus. Next load + * balancing owner will pick it up. + */ + if (need_resched()) { + this_rq->nohz_balance_kick = 0; + break; + } + + raw_spin_lock_irq(&this_rq->lock); + update_cpu_load(this_rq); + raw_spin_unlock_irq(&this_rq->lock); + + rebalance_domains(balance_cpu, CPU_IDLE); + + rq = cpu_rq(balance_cpu); + if (time_after(this_rq->next_balance, rq->next_balance)) + this_rq->next_balance = rq->next_balance; + } + nohz.next_balance = this_rq->next_balance; + this_rq->nohz_balance_kick = 0; +} + +/* + * Current heuristic for kicking the idle load balancer + * - first_pick_cpu is the one of the busy CPUs. It will kick + * idle load balancer when it has more than one process active. This + * eliminates the need for idle load balancing altogether when we have + * only one running process in the system (common case). + * - If there are more than one busy CPU, idle load balancer may have + * to run for active_load_balance to happen (i.e., two busy CPUs are + * SMT or core siblings and can run better if they move to different + * physical CPUs). So, second_pick_cpu is the second of the busy CPUs + * which will kick idle load balancer as soon as it has any load. + */ +static inline int nohz_kick_needed(struct rq *rq, int cpu) +{ + unsigned long now = jiffies; + int ret; + int first_pick_cpu, second_pick_cpu; + + if (time_before(now, nohz.next_balance)) + return 0; + + if (!rq->nr_running) + return 0; + + first_pick_cpu = atomic_read(&nohz.first_pick_cpu); + second_pick_cpu = atomic_read(&nohz.second_pick_cpu); + + if (first_pick_cpu < nr_cpu_ids && first_pick_cpu != cpu && + second_pick_cpu < nr_cpu_ids && second_pick_cpu != cpu) + return 0; + + ret = atomic_cmpxchg(&nohz.first_pick_cpu, nr_cpu_ids, cpu); + if (ret == nr_cpu_ids || ret == cpu) { + atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids); + if (rq->nr_running > 1) + return 1; + } else { + ret = atomic_cmpxchg(&nohz.second_pick_cpu, nr_cpu_ids, cpu); + if (ret == nr_cpu_ids || ret == cpu) { + if (rq->nr_running) + return 1; + } + } + return 0; +} +#else +static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { } +#endif + +/* + * run_rebalance_domains is triggered when needed from the scheduler tick. + * Also triggered for nohz idle balancing (with nohz_balancing_kick set). + */ static void run_rebalance_domains(struct softirq_action *h) { int this_cpu = smp_processor_id(); @@ -3397,40 +3532,12 @@ static void run_rebalance_domains(struct softirq_action *h) rebalance_domains(this_cpu, idle); -#ifdef CONFIG_NO_HZ /* - * If this cpu is the owner for idle load balancing, then do the + * If this cpu has a pending nohz_balance_kick, then do the * balancing on behalf of the other idle cpus whose ticks are * stopped. */ - if (this_rq->idle_at_tick && - atomic_read(&nohz.load_balancer) == this_cpu) { - struct rq *rq; - int balance_cpu; - - for_each_cpu(balance_cpu, nohz.cpu_mask) { - if (balance_cpu == this_cpu) - continue; - - /* - * If this cpu gets work to do, stop the load balancing - * work being done for other cpus. Next load - * balancing owner will pick it up. - */ - if (need_resched()) - break; - - rq = cpu_rq(balance_cpu); - raw_spin_lock_irq(&rq->lock); - update_cpu_load(rq); - raw_spin_unlock_irq(&rq->lock); - rebalance_domains(balance_cpu, CPU_IDLE); - - if (time_after(this_rq->next_balance, rq->next_balance)) - this_rq->next_balance = rq->next_balance; - } - } -#endif + nohz_idle_balance(this_cpu, idle); } static inline int on_null_domain(int cpu) @@ -3440,57 +3547,17 @@ static inline int on_null_domain(int cpu) /* * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. - * - * In case of CONFIG_NO_HZ, this is the place where we nominate a new - * idle load balancing owner or decide to stop the periodic load balancing, - * if the whole system is idle. */ static inline void trigger_load_balance(struct rq *rq, int cpu) { -#ifdef CONFIG_NO_HZ - /* - * If we were in the nohz mode recently and busy at the current - * scheduler tick, then check if we need to nominate new idle - * load balancer. - */ - if (rq->in_nohz_recently && !rq->idle_at_tick) { - rq->in_nohz_recently = 0; - - if (atomic_read(&nohz.load_balancer) == cpu) { - cpumask_clear_cpu(cpu, nohz.cpu_mask); - atomic_set(&nohz.load_balancer, -1); - } - - if (atomic_read(&nohz.load_balancer) == -1) { - int ilb = find_new_ilb(cpu); - - if (ilb < nr_cpu_ids) - resched_cpu(ilb); - } - } - - /* - * If this cpu is idle and doing idle load balancing for all the - * cpus with ticks stopped, is it time for that to stop? - */ - if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && - cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { - resched_cpu(cpu); - return; - } - - /* - * If this cpu is idle and the idle load balancing is done by - * someone else, then no need raise the SCHED_SOFTIRQ - */ - if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && - cpumask_test_cpu(cpu, nohz.cpu_mask)) - return; -#endif /* Don't need to rebalance while attached to NULL domain */ if (time_after_eq(jiffies, rq->next_balance) && likely(!on_null_domain(cpu))) raise_softirq(SCHED_SOFTIRQ); +#ifdef CONFIG_NO_HZ + else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu))) + nohz_balancer_kick(cpu); +#endif } static void rq_online_fair(struct rq *rq) diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1d7b9bc..5f171f0 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -408,13 +408,7 @@ void tick_nohz_stop_sched_tick(int inidle) * the scheduler tick in nohz_restart_sched_tick. */ if (!ts->tick_stopped) { - if (select_nohz_load_balancer(1)) { - /* - * sched tick not stopped! - */ - cpumask_clear_cpu(cpu, nohz_cpu_mask); - goto out; - } + select_nohz_load_balancer(1); ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); ts->tick_stopped = 1; diff --git a/kernel/timer.c b/kernel/timer.c index ee305c8..48d6aec 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -679,12 +679,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, cpu = smp_processor_id(); #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) - if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) { - int preferred_cpu = get_nohz_load_balancer(); - - if (preferred_cpu >= 0) - cpu = preferred_cpu; - } + if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) + cpu = get_nohz_timer_target(); #endif new_base = per_cpu(tvec_bases, cpu); -- cgit v1.1 From 9d5efe05eb0c904545a28b19c18b949f23334de0 Mon Sep 17 00:00:00 2001 From: Srivatsa Vaddagiri Date: Tue, 8 Jun 2010 14:57:02 +1000 Subject: sched: Fix capacity calculations for SMT4 Handle cpu capacity being reported as 0 on cores with more number of hardware threads. For example on a Power7 core with 4 hardware threads, core power is 1177 and thus power of each hardware thread is 1177/4 = 294. This low power can lead to capacity for each hardware thread being calculated as 0, which leads to tasks bouncing within the core madly! Fix this by reporting capacity for hardware threads as 1, provided their power is not scaled down significantly because of frequency scaling or real-time tasks usage of cpu. Signed-off-by: Srivatsa Vaddagiri Signed-off-by: Michael Neuling Signed-off-by: Peter Zijlstra Cc: Arjan van de Ven LKML-Reference: <20100608045702.21D03CC895@localhost.localdomain> Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 53 +++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 43 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 6ee2e0a..b9b3462 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -2285,13 +2285,6 @@ static void update_cpu_power(struct sched_domain *sd, int cpu) unsigned long power = SCHED_LOAD_SCALE; struct sched_group *sdg = sd->groups; - if (sched_feat(ARCH_POWER)) - power *= arch_scale_freq_power(sd, cpu); - else - power *= default_scale_freq_power(sd, cpu); - - power >>= SCHED_LOAD_SHIFT; - if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { if (sched_feat(ARCH_POWER)) power *= arch_scale_smt_power(sd, cpu); @@ -2301,6 +2294,15 @@ static void update_cpu_power(struct sched_domain *sd, int cpu) power >>= SCHED_LOAD_SHIFT; } + sdg->cpu_power_orig = power; + + if (sched_feat(ARCH_POWER)) + power *= arch_scale_freq_power(sd, cpu); + else + power *= default_scale_freq_power(sd, cpu); + + power >>= SCHED_LOAD_SHIFT; + power *= scale_rt_power(cpu); power >>= SCHED_LOAD_SHIFT; @@ -2333,6 +2335,31 @@ static void update_group_power(struct sched_domain *sd, int cpu) sdg->cpu_power = power; } +/* + * Try and fix up capacity for tiny siblings, this is needed when + * things like SD_ASYM_PACKING need f_b_g to select another sibling + * which on its own isn't powerful enough. + * + * See update_sd_pick_busiest() and check_asym_packing(). + */ +static inline int +fix_small_capacity(struct sched_domain *sd, struct sched_group *group) +{ + /* + * Only siblings can have significantly less than SCHED_LOAD_SCALE + */ + if (sd->level != SD_LV_SIBLING) + return 0; + + /* + * If ~90% of the cpu_power is still there, we're good. + */ + if (group->cpu_power * 32 < group->cpu_power_orig * 29) + return 1; + + return 0; +} + /** * update_sg_lb_stats - Update sched_group's statistics for load balancing. * @sd: The sched_domain whose statistics are to be updated. @@ -2426,6 +2453,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); + if (!sgs->group_capacity) + sgs->group_capacity = fix_small_capacity(sd, group); } /** @@ -2724,8 +2753,9 @@ ret: * find_busiest_queue - find the busiest runqueue among the cpus in group. */ static struct rq * -find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, - unsigned long imbalance, const struct cpumask *cpus) +find_busiest_queue(struct sched_domain *sd, struct sched_group *group, + enum cpu_idle_type idle, unsigned long imbalance, + const struct cpumask *cpus) { struct rq *busiest = NULL, *rq; unsigned long max_load = 0; @@ -2736,6 +2766,9 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE); unsigned long wl; + if (!capacity) + capacity = fix_small_capacity(sd, group); + if (!cpumask_test_cpu(i, cpus)) continue; @@ -2852,7 +2885,7 @@ redo: goto out_balanced; } - busiest = find_busiest_queue(group, idle, imbalance, cpus); + busiest = find_busiest_queue(sd, group, idle, imbalance, cpus); if (!busiest) { schedstat_inc(sd, lb_nobusyq[idle]); goto out_balanced; -- cgit v1.1 From 532cb4c401e225b084c14d6bd6a2f8ee561de2f1 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Tue, 8 Jun 2010 14:57:02 +1000 Subject: sched: Add asymmetric group packing option for sibling domain Check to see if the group is packed in a sched doman. This is primarily intended to used at the sibling level. Some cores like POWER7 prefer to use lower numbered SMT threads. In the case of POWER7, it can move to lower SMT modes only when higher threads are idle. When in lower SMT modes, the threads will perform better since they share less core resources. Hence when we have idle threads, we want them to be the higher ones. This adds a hook into f_b_g() called check_asym_packing() to check the packing. This packing function is run on idle threads. It checks to see if the busiest CPU in this domain (core in the P7 case) has a higher CPU number than what where the packing function is being run on. If it is, calculate the imbalance and return the higher busier thread as the busiest group to f_b_g(). Here we are assuming a lower CPU number will be equivalent to a lower SMT thread number. It also creates a new SD_ASYM_PACKING flag to enable this feature at any scheduler domain level. It also creates an arch hook to enable this feature at the sibling level. The default function doesn't enable this feature. Based heavily on patch from Peter Zijlstra. Fixes from Srivatsa Vaddagiri. Signed-off-by: Michael Neuling Signed-off-by: Srivatsa Vaddagiri Signed-off-by: Peter Zijlstra Cc: Arjan van de Ven Cc: "H. Peter Anvin" Cc: Thomas Gleixner LKML-Reference: <20100608045702.2936CCC897@localhost.localdomain> Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 139 +++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 122 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index b9b3462..593424f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -2458,11 +2458,53 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, } /** + * update_sd_pick_busiest - return 1 on busiest group + * @sd: sched_domain whose statistics are to be checked + * @sds: sched_domain statistics + * @sg: sched_group candidate to be checked for being the busiest + * @sds: sched_group statistics + * + * Determine if @sg is a busier group than the previously selected + * busiest group. + */ +static bool update_sd_pick_busiest(struct sched_domain *sd, + struct sd_lb_stats *sds, + struct sched_group *sg, + struct sg_lb_stats *sgs, + int this_cpu) +{ + if (sgs->avg_load <= sds->max_load) + return false; + + if (sgs->sum_nr_running > sgs->group_capacity) + return true; + + if (sgs->group_imb) + return true; + + /* + * ASYM_PACKING needs to move all the work to the lowest + * numbered CPUs in the group, therefore mark all groups + * higher than ourself as busy. + */ + if ((sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running && + this_cpu < group_first_cpu(sg)) { + if (!sds->busiest) + return true; + + if (group_first_cpu(sds->busiest) > group_first_cpu(sg)) + return true; + } + + return false; +} + +/** * update_sd_lb_stats - Update sched_group's statistics for load balancing. * @sd: sched_domain whose statistics are to be updated. * @this_cpu: Cpu for which load balance is currently performed. * @idle: Idle status of this_cpu - * @sd_idle: Idle status of the sched_domain containing group. + * @sd_idle: Idle status of the sched_domain containing sg. * @cpus: Set of cpus considered for load balancing. * @balance: Should we balance. * @sds: variable to hold the statistics for this sched_domain. @@ -2473,7 +2515,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, struct sd_lb_stats *sds) { struct sched_domain *child = sd->child; - struct sched_group *group = sd->groups; + struct sched_group *sg = sd->groups; struct sg_lb_stats sgs; int load_idx, prefer_sibling = 0; @@ -2486,21 +2528,20 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, do { int local_group; - local_group = cpumask_test_cpu(this_cpu, - sched_group_cpus(group)); + local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg)); memset(&sgs, 0, sizeof(sgs)); - update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle, + update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx, sd_idle, local_group, cpus, balance, &sgs); if (local_group && !(*balance)) return; sds->total_load += sgs.group_load; - sds->total_pwr += group->cpu_power; + sds->total_pwr += sg->cpu_power; /* * In case the child domain prefers tasks go to siblings - * first, lower the group capacity to one so that we'll try + * first, lower the sg capacity to one so that we'll try * and move all the excess tasks away. */ if (prefer_sibling) @@ -2508,23 +2549,72 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, if (local_group) { sds->this_load = sgs.avg_load; - sds->this = group; + sds->this = sg; sds->this_nr_running = sgs.sum_nr_running; sds->this_load_per_task = sgs.sum_weighted_load; - } else if (sgs.avg_load > sds->max_load && - (sgs.sum_nr_running > sgs.group_capacity || - sgs.group_imb)) { + } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) { sds->max_load = sgs.avg_load; - sds->busiest = group; + sds->busiest = sg; sds->busiest_nr_running = sgs.sum_nr_running; sds->busiest_group_capacity = sgs.group_capacity; sds->busiest_load_per_task = sgs.sum_weighted_load; sds->group_imb = sgs.group_imb; } - update_sd_power_savings_stats(group, sds, local_group, &sgs); - group = group->next; - } while (group != sd->groups); + update_sd_power_savings_stats(sg, sds, local_group, &sgs); + sg = sg->next; + } while (sg != sd->groups); +} + +int __weak arch_sd_sibiling_asym_packing(void) +{ + return 0*SD_ASYM_PACKING; +} + +/** + * check_asym_packing - Check to see if the group is packed into the + * sched doman. + * + * This is primarily intended to used at the sibling level. Some + * cores like POWER7 prefer to use lower numbered SMT threads. In the + * case of POWER7, it can move to lower SMT modes only when higher + * threads are idle. When in lower SMT modes, the threads will + * perform better since they share less core resources. Hence when we + * have idle threads, we want them to be the higher ones. + * + * This packing function is run on idle threads. It checks to see if + * the busiest CPU in this domain (core in the P7 case) has a higher + * CPU number than the packing function is being run on. Here we are + * assuming lower CPU number will be equivalent to lower a SMT thread + * number. + * + * @sd: The sched_domain whose packing is to be checked. + * @sds: Statistics of the sched_domain which is to be packed + * @this_cpu: The cpu at whose sched_domain we're performing load-balance. + * @imbalance: returns amount of imbalanced due to packing. + * + * Returns 1 when packing is required and a task should be moved to + * this CPU. The amount of the imbalance is returned in *imbalance. + */ +static int check_asym_packing(struct sched_domain *sd, + struct sd_lb_stats *sds, + int this_cpu, unsigned long *imbalance) +{ + int busiest_cpu; + + if (!(sd->flags & SD_ASYM_PACKING)) + return 0; + + if (!sds->busiest) + return 0; + + busiest_cpu = group_first_cpu(sds->busiest); + if (this_cpu > busiest_cpu) + return 0; + + *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power, + SCHED_LOAD_SCALE); + return 1; } /** @@ -2719,6 +2809,10 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, if (!(*balance)) goto ret; + if ((idle == CPU_IDLE || idle == CPU_NEWLY_IDLE) && + check_asym_packing(sd, &sds, this_cpu, imbalance)) + return sds.busiest; + if (!sds.busiest || sds.busiest_nr_running == 0) goto out_balanced; @@ -2808,9 +2902,19 @@ find_busiest_queue(struct sched_domain *sd, struct sched_group *group, /* Working cpumask for load_balance and load_balance_newidle. */ static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask); -static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle) +static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle, + int busiest_cpu, int this_cpu) { if (idle == CPU_NEWLY_IDLE) { + + /* + * ASYM_PACKING needs to force migrate tasks from busy but + * higher numbered CPUs in order to pack all tasks in the + * lowest numbered CPUs. + */ + if ((sd->flags & SD_ASYM_PACKING) && busiest_cpu > this_cpu) + return 1; + /* * The only task running in a non-idle cpu can be moved to this * cpu in an attempt to completely freeup the other CPU @@ -2929,7 +3033,8 @@ redo: schedstat_inc(sd, lb_failed[idle]); sd->nr_balance_failed++; - if (need_active_balance(sd, sd_idle, idle)) { + if (need_active_balance(sd, sd_idle, idle, cpu_of(busiest), + this_cpu)) { raw_spin_lock_irqsave(&busiest->lock, flags); /* don't kick the active_load_balance_cpu_stop, -- cgit v1.1 From ecc55f84b2e9741f29daa787ded93986df6cbe17 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 21 May 2010 15:11:34 +0200 Subject: perf, trace: Inline perf_swevent_put_recursion_context() Inline perf_swevent_put_recursion_context into perf_tp_event(), this shrinks the per trace template code footprint and saves a function call. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index ff86c55..4bd3b59 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4213,14 +4213,12 @@ int perf_swevent_get_recursion_context(void) } EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); -void perf_swevent_put_recursion_context(int rctx) +void inline perf_swevent_put_recursion_context(int rctx) { struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); barrier(); cpuctx->recursion[rctx]--; } -EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context); - void __perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) @@ -4601,7 +4599,7 @@ static int perf_tp_event_match(struct perf_event *event, } void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, - struct pt_regs *regs, struct hlist_head *head) + struct pt_regs *regs, struct hlist_head *head, int rctx) { struct perf_sample_data data; struct perf_event *event; @@ -4621,6 +4619,8 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, perf_swevent_add(event, count, 1, &data, regs); } rcu_read_unlock(); + + perf_swevent_put_recursion_context(rctx); } EXPORT_SYMBOL_GPL(perf_tp_event); -- cgit v1.1 From 8ed92280be013180e24c84456ab6babcb07037cc Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 21 May 2010 15:13:59 +0200 Subject: perf, trace: Remove superfluous rcu_read_lock() __DO_TRACE() already calls the callbacks under rcu_read_lock_sched(), which is sufficient for our needs, avoid doing it again. Signed-off-by: Peter Zijlstra Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 4bd3b59..b39bec3 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4613,12 +4613,10 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, perf_sample_data_init(&data, addr); data.raw = &raw; - rcu_read_lock(); hlist_for_each_entry_rcu(event, node, head, hlist_entry) { if (perf_tp_event_match(event, &data, regs)) perf_swevent_add(event, count, 1, &data, regs); } - rcu_read_unlock(); perf_swevent_put_recursion_context(rctx); } -- cgit v1.1 From 3af9e859281bda7eb7c20b51879cf43aa788ac2e Mon Sep 17 00:00:00 2001 From: Eric B Munson Date: Tue, 18 May 2010 15:30:49 +0100 Subject: perf: Add non-exec mmap() tracking Add the capacility to track data mmap()s. This can be used together with PERF_SAMPLE_ADDR for data profiling. Signed-off-by: Anton Blanchard [Updated code for stable perf ABI] Signed-off-by: Eric B Munson Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: <1274193049-25997-1-git-send-email-ebmunson@us.ibm.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 34 +++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index b39bec3..227ed9c 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1891,7 +1891,7 @@ static void free_event(struct perf_event *event) if (!event->parent) { atomic_dec(&nr_events); - if (event->attr.mmap) + if (event->attr.mmap || event->attr.mmap_data) atomic_dec(&nr_mmap_events); if (event->attr.comm) atomic_dec(&nr_comm_events); @@ -3491,7 +3491,7 @@ perf_event_read_event(struct perf_event *event, /* * task tracking -- fork/exit * - * enabled by: attr.comm | attr.mmap | attr.task + * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task */ struct perf_task_event { @@ -3541,7 +3541,8 @@ static int perf_event_task_match(struct perf_event *event) if (event->cpu != -1 && event->cpu != smp_processor_id()) return 0; - if (event->attr.comm || event->attr.mmap || event->attr.task) + if (event->attr.comm || event->attr.mmap || + event->attr.mmap_data || event->attr.task) return 1; return 0; @@ -3766,7 +3767,8 @@ static void perf_event_mmap_output(struct perf_event *event, } static int perf_event_mmap_match(struct perf_event *event, - struct perf_mmap_event *mmap_event) + struct perf_mmap_event *mmap_event, + int executable) { if (event->state < PERF_EVENT_STATE_INACTIVE) return 0; @@ -3774,19 +3776,21 @@ static int perf_event_mmap_match(struct perf_event *event, if (event->cpu != -1 && event->cpu != smp_processor_id()) return 0; - if (event->attr.mmap) + if ((!executable && event->attr.mmap_data) || + (executable && event->attr.mmap)) return 1; return 0; } static void perf_event_mmap_ctx(struct perf_event_context *ctx, - struct perf_mmap_event *mmap_event) + struct perf_mmap_event *mmap_event, + int executable) { struct perf_event *event; list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { - if (perf_event_mmap_match(event, mmap_event)) + if (perf_event_mmap_match(event, mmap_event, executable)) perf_event_mmap_output(event, mmap_event); } } @@ -3830,6 +3834,14 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) if (!vma->vm_mm) { name = strncpy(tmp, "[vdso]", sizeof(tmp)); goto got_name; + } else if (vma->vm_start <= vma->vm_mm->start_brk && + vma->vm_end >= vma->vm_mm->brk) { + name = strncpy(tmp, "[heap]", sizeof(tmp)); + goto got_name; + } else if (vma->vm_start <= vma->vm_mm->start_stack && + vma->vm_end >= vma->vm_mm->start_stack) { + name = strncpy(tmp, "[stack]", sizeof(tmp)); + goto got_name; } name = strncpy(tmp, "//anon", sizeof(tmp)); @@ -3846,17 +3858,17 @@ got_name: rcu_read_lock(); cpuctx = &get_cpu_var(perf_cpu_context); - perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); + perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC); ctx = rcu_dereference(current->perf_event_ctxp); if (ctx) - perf_event_mmap_ctx(ctx, mmap_event); + perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC); put_cpu_var(perf_cpu_context); rcu_read_unlock(); kfree(buf); } -void __perf_event_mmap(struct vm_area_struct *vma) +void perf_event_mmap(struct vm_area_struct *vma) { struct perf_mmap_event mmap_event; @@ -4911,7 +4923,7 @@ done: if (!event->parent) { atomic_inc(&nr_events); - if (event->attr.mmap) + if (event->attr.mmap || event->attr.mmap_data) atomic_inc(&nr_mmap_events); if (event->attr.comm) atomic_inc(&nr_comm_events); -- cgit v1.1 From 8d2cacbbb8deadfae78aa16e4e1ee619bdd7019e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 25 May 2010 17:49:05 +0200 Subject: perf: Cleanup {start,commit,cancel}_txn details Clarify some of the transactional group scheduling API details and change it so that a successfull ->commit_txn also closes the transaction. Signed-off-by: Peter Zijlstra Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: <1274803086.5882.1752.camel@twins> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 227ed9c..6f60920 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -675,7 +675,6 @@ group_sched_in(struct perf_event *group_event, struct perf_event *event, *partial_group = NULL; const struct pmu *pmu = group_event->pmu; bool txn = false; - int ret; if (group_event->state == PERF_EVENT_STATE_OFF) return 0; @@ -703,15 +702,9 @@ group_sched_in(struct perf_event *group_event, } } - if (!txn) + if (!txn || !pmu->commit_txn(pmu)) return 0; - ret = pmu->commit_txn(pmu); - if (!ret) { - pmu->cancel_txn(pmu); - return 0; - } - group_error: /* * Groups can be scheduled in as one unit only, so undo any -- cgit v1.1 From ca5135e6b4a3cbc7e187737520fbc4b508f6f7a2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 28 May 2010 19:33:23 +0200 Subject: perf: Rename perf_mmap_data to perf_buffer Rename to clarify code. s/perf_mmap_data/perf_buffer/g and selective s/data/buffer/g Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 308 ++++++++++++++++++++++++++-------------------------- 1 file changed, 154 insertions(+), 154 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 6f60920..93d5458 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1876,7 +1876,7 @@ static void free_event_rcu(struct rcu_head *head) } static void perf_pending_sync(struct perf_event *event); -static void perf_mmap_data_put(struct perf_mmap_data *data); +static void perf_buffer_put(struct perf_buffer *buffer); static void free_event(struct perf_event *event) { @@ -1892,9 +1892,9 @@ static void free_event(struct perf_event *event) atomic_dec(&nr_task_events); } - if (event->data) { - perf_mmap_data_put(event->data); - event->data = NULL; + if (event->buffer) { + perf_buffer_put(event->buffer); + event->buffer = NULL; } if (event->destroy) @@ -2119,13 +2119,13 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) static unsigned int perf_poll(struct file *file, poll_table *wait) { struct perf_event *event = file->private_data; - struct perf_mmap_data *data; + struct perf_buffer *buffer; unsigned int events = POLL_HUP; rcu_read_lock(); - data = rcu_dereference(event->data); - if (data) - events = atomic_xchg(&data->poll, 0); + buffer = rcu_dereference(event->buffer); + if (buffer) + events = atomic_xchg(&buffer->poll, 0); rcu_read_unlock(); poll_wait(file, &event->waitq, wait); @@ -2335,14 +2335,14 @@ static int perf_event_index(struct perf_event *event) void perf_event_update_userpage(struct perf_event *event) { struct perf_event_mmap_page *userpg; - struct perf_mmap_data *data; + struct perf_buffer *buffer; rcu_read_lock(); - data = rcu_dereference(event->data); - if (!data) + buffer = rcu_dereference(event->buffer); + if (!buffer) goto unlock; - userpg = data->user_page; + userpg = buffer->user_page; /* * Disable preemption so as to not let the corresponding user-space @@ -2376,15 +2376,15 @@ unlock: */ static struct page * -perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) +perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff) { - if (pgoff > data->nr_pages) + if (pgoff > buffer->nr_pages) return NULL; if (pgoff == 0) - return virt_to_page(data->user_page); + return virt_to_page(buffer->user_page); - return virt_to_page(data->data_pages[pgoff - 1]); + return virt_to_page(buffer->data_pages[pgoff - 1]); } static void *perf_mmap_alloc_page(int cpu) @@ -2400,42 +2400,42 @@ static void *perf_mmap_alloc_page(int cpu) return page_address(page); } -static struct perf_mmap_data * -perf_mmap_data_alloc(struct perf_event *event, int nr_pages) +static struct perf_buffer * +perf_buffer_alloc(struct perf_event *event, int nr_pages) { - struct perf_mmap_data *data; + struct perf_buffer *buffer; unsigned long size; int i; - size = sizeof(struct perf_mmap_data); + size = sizeof(struct perf_buffer); size += nr_pages * sizeof(void *); - data = kzalloc(size, GFP_KERNEL); - if (!data) + buffer = kzalloc(size, GFP_KERNEL); + if (!buffer) goto fail; - data->user_page = perf_mmap_alloc_page(event->cpu); - if (!data->user_page) + buffer->user_page = perf_mmap_alloc_page(event->cpu); + if (!buffer->user_page) goto fail_user_page; for (i = 0; i < nr_pages; i++) { - data->data_pages[i] = perf_mmap_alloc_page(event->cpu); - if (!data->data_pages[i]) + buffer->data_pages[i] = perf_mmap_alloc_page(event->cpu); + if (!buffer->data_pages[i]) goto fail_data_pages; } - data->nr_pages = nr_pages; + buffer->nr_pages = nr_pages; - return data; + return buffer; fail_data_pages: for (i--; i >= 0; i--) - free_page((unsigned long)data->data_pages[i]); + free_page((unsigned long)buffer->data_pages[i]); - free_page((unsigned long)data->user_page); + free_page((unsigned long)buffer->user_page); fail_user_page: - kfree(data); + kfree(buffer); fail: return NULL; @@ -2449,17 +2449,17 @@ static void perf_mmap_free_page(unsigned long addr) __free_page(page); } -static void perf_mmap_data_free(struct perf_mmap_data *data) +static void perf_buffer_free(struct perf_buffer *buffer) { int i; - perf_mmap_free_page((unsigned long)data->user_page); - for (i = 0; i < data->nr_pages; i++) - perf_mmap_free_page((unsigned long)data->data_pages[i]); - kfree(data); + perf_mmap_free_page((unsigned long)buffer->user_page); + for (i = 0; i < buffer->nr_pages; i++) + perf_mmap_free_page((unsigned long)buffer->data_pages[i]); + kfree(buffer); } -static inline int page_order(struct perf_mmap_data *data) +static inline int page_order(struct perf_buffer *buffer) { return 0; } @@ -2472,18 +2472,18 @@ static inline int page_order(struct perf_mmap_data *data) * Required for architectures that have d-cache aliasing issues. */ -static inline int page_order(struct perf_mmap_data *data) +static inline int page_order(struct perf_buffer *buffer) { - return data->page_order; + return buffer->page_order; } static struct page * -perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) +perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff) { - if (pgoff > (1UL << page_order(data))) + if (pgoff > (1UL << page_order(buffer))) return NULL; - return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE); + return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE); } static void perf_mmap_unmark_page(void *addr) @@ -2493,57 +2493,57 @@ static void perf_mmap_unmark_page(void *addr) page->mapping = NULL; } -static void perf_mmap_data_free_work(struct work_struct *work) +static void perf_buffer_free_work(struct work_struct *work) { - struct perf_mmap_data *data; + struct perf_buffer *buffer; void *base; int i, nr; - data = container_of(work, struct perf_mmap_data, work); - nr = 1 << page_order(data); + buffer = container_of(work, struct perf_buffer, work); + nr = 1 << page_order(buffer); - base = data->user_page; + base = buffer->user_page; for (i = 0; i < nr + 1; i++) perf_mmap_unmark_page(base + (i * PAGE_SIZE)); vfree(base); - kfree(data); + kfree(buffer); } -static void perf_mmap_data_free(struct perf_mmap_data *data) +static void perf_buffer_free(struct perf_buffer *buffer) { - schedule_work(&data->work); + schedule_work(&buffer->work); } -static struct perf_mmap_data * -perf_mmap_data_alloc(struct perf_event *event, int nr_pages) +static struct perf_buffer * +perf_buffer_alloc(struct perf_event *event, int nr_pages) { - struct perf_mmap_data *data; + struct perf_buffer *buffer; unsigned long size; void *all_buf; - size = sizeof(struct perf_mmap_data); + size = sizeof(struct perf_buffer); size += sizeof(void *); - data = kzalloc(size, GFP_KERNEL); - if (!data) + buffer = kzalloc(size, GFP_KERNEL); + if (!buffer) goto fail; - INIT_WORK(&data->work, perf_mmap_data_free_work); + INIT_WORK(&buffer->work, perf_buffer_free_work); all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); if (!all_buf) goto fail_all_buf; - data->user_page = all_buf; - data->data_pages[0] = all_buf + PAGE_SIZE; - data->page_order = ilog2(nr_pages); - data->nr_pages = 1; + buffer->user_page = all_buf; + buffer->data_pages[0] = all_buf + PAGE_SIZE; + buffer->page_order = ilog2(nr_pages); + buffer->nr_pages = 1; - return data; + return buffer; fail_all_buf: - kfree(data); + kfree(buffer); fail: return NULL; @@ -2551,15 +2551,15 @@ fail: #endif -static unsigned long perf_data_size(struct perf_mmap_data *data) +static unsigned long perf_data_size(struct perf_buffer *buffer) { - return data->nr_pages << (PAGE_SHIFT + page_order(data)); + return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer)); } static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct perf_event *event = vma->vm_file->private_data; - struct perf_mmap_data *data; + struct perf_buffer *buffer; int ret = VM_FAULT_SIGBUS; if (vmf->flags & FAULT_FLAG_MKWRITE) { @@ -2569,14 +2569,14 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } rcu_read_lock(); - data = rcu_dereference(event->data); - if (!data) + buffer = rcu_dereference(event->buffer); + if (!buffer) goto unlock; if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) goto unlock; - vmf->page = perf_mmap_to_page(data, vmf->pgoff); + vmf->page = perf_mmap_to_page(buffer, vmf->pgoff); if (!vmf->page) goto unlock; @@ -2592,51 +2592,51 @@ unlock: } static void -perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data) +perf_buffer_init(struct perf_event *event, struct perf_buffer *buffer) { - long max_size = perf_data_size(data); + long max_size = perf_data_size(buffer); if (event->attr.watermark) { - data->watermark = min_t(long, max_size, + buffer->watermark = min_t(long, max_size, event->attr.wakeup_watermark); } - if (!data->watermark) - data->watermark = max_size / 2; + if (!buffer->watermark) + buffer->watermark = max_size / 2; - atomic_set(&data->refcount, 1); - rcu_assign_pointer(event->data, data); + atomic_set(&buffer->refcount, 1); + rcu_assign_pointer(event->buffer, buffer); } -static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head) +static void perf_buffer_free_rcu(struct rcu_head *rcu_head) { - struct perf_mmap_data *data; + struct perf_buffer *buffer; - data = container_of(rcu_head, struct perf_mmap_data, rcu_head); - perf_mmap_data_free(data); + buffer = container_of(rcu_head, struct perf_buffer, rcu_head); + perf_buffer_free(buffer); } -static struct perf_mmap_data *perf_mmap_data_get(struct perf_event *event) +static struct perf_buffer *perf_buffer_get(struct perf_event *event) { - struct perf_mmap_data *data; + struct perf_buffer *buffer; rcu_read_lock(); - data = rcu_dereference(event->data); - if (data) { - if (!atomic_inc_not_zero(&data->refcount)) - data = NULL; + buffer = rcu_dereference(event->buffer); + if (buffer) { + if (!atomic_inc_not_zero(&buffer->refcount)) + buffer = NULL; } rcu_read_unlock(); - return data; + return buffer; } -static void perf_mmap_data_put(struct perf_mmap_data *data) +static void perf_buffer_put(struct perf_buffer *buffer) { - if (!atomic_dec_and_test(&data->refcount)) + if (!atomic_dec_and_test(&buffer->refcount)) return; - call_rcu(&data->rcu_head, perf_mmap_data_free_rcu); + call_rcu(&buffer->rcu_head, perf_buffer_free_rcu); } static void perf_mmap_open(struct vm_area_struct *vma) @@ -2651,16 +2651,16 @@ static void perf_mmap_close(struct vm_area_struct *vma) struct perf_event *event = vma->vm_file->private_data; if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { - unsigned long size = perf_data_size(event->data); + unsigned long size = perf_data_size(event->buffer); struct user_struct *user = event->mmap_user; - struct perf_mmap_data *data = event->data; + struct perf_buffer *buffer = event->buffer; atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); vma->vm_mm->locked_vm -= event->mmap_locked; - rcu_assign_pointer(event->data, NULL); + rcu_assign_pointer(event->buffer, NULL); mutex_unlock(&event->mmap_mutex); - perf_mmap_data_put(data); + perf_buffer_put(buffer); free_uid(user); } } @@ -2678,7 +2678,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) unsigned long user_locked, user_lock_limit; struct user_struct *user = current_user(); unsigned long locked, lock_limit; - struct perf_mmap_data *data; + struct perf_buffer *buffer; unsigned long vma_size; unsigned long nr_pages; long user_extra, extra; @@ -2699,7 +2699,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) nr_pages = (vma_size / PAGE_SIZE) - 1; /* - * If we have data pages ensure they're a power-of-two number, so we + * If we have buffer pages ensure they're a power-of-two number, so we * can do bitmasks instead of modulo. */ if (nr_pages != 0 && !is_power_of_2(nr_pages)) @@ -2713,9 +2713,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) WARN_ON_ONCE(event->ctx->parent_ctx); mutex_lock(&event->mmap_mutex); - if (event->data) { - if (event->data->nr_pages == nr_pages) - atomic_inc(&event->data->refcount); + if (event->buffer) { + if (event->buffer->nr_pages == nr_pages) + atomic_inc(&event->buffer->refcount); else ret = -EINVAL; goto unlock; @@ -2745,17 +2745,17 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) goto unlock; } - WARN_ON(event->data); + WARN_ON(event->buffer); - data = perf_mmap_data_alloc(event, nr_pages); - if (!data) { + buffer = perf_buffer_alloc(event, nr_pages); + if (!buffer) { ret = -ENOMEM; goto unlock; } - perf_mmap_data_init(event, data); + perf_buffer_init(event, buffer); if (vma->vm_flags & VM_WRITE) - event->data->writable = 1; + event->buffer->writable = 1; atomic_long_add(user_extra, &user->locked_vm); event->mmap_locked = extra; @@ -2964,15 +2964,15 @@ EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); /* * Output */ -static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail, +static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail, unsigned long offset, unsigned long head) { unsigned long mask; - if (!data->writable) + if (!buffer->writable) return true; - mask = perf_data_size(data) - 1; + mask = perf_data_size(buffer) - 1; offset = (offset - tail) & mask; head = (head - tail) & mask; @@ -2985,7 +2985,7 @@ static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail, static void perf_output_wakeup(struct perf_output_handle *handle) { - atomic_set(&handle->data->poll, POLL_IN); + atomic_set(&handle->buffer->poll, POLL_IN); if (handle->nmi) { handle->event->pending_wakeup = 1; @@ -3005,45 +3005,45 @@ static void perf_output_wakeup(struct perf_output_handle *handle) */ static void perf_output_get_handle(struct perf_output_handle *handle) { - struct perf_mmap_data *data = handle->data; + struct perf_buffer *buffer = handle->buffer; preempt_disable(); - local_inc(&data->nest); - handle->wakeup = local_read(&data->wakeup); + local_inc(&buffer->nest); + handle->wakeup = local_read(&buffer->wakeup); } static void perf_output_put_handle(struct perf_output_handle *handle) { - struct perf_mmap_data *data = handle->data; + struct perf_buffer *buffer = handle->buffer; unsigned long head; again: - head = local_read(&data->head); + head = local_read(&buffer->head); /* * IRQ/NMI can happen here, which means we can miss a head update. */ - if (!local_dec_and_test(&data->nest)) + if (!local_dec_and_test(&buffer->nest)) goto out; /* * Publish the known good head. Rely on the full barrier implied - * by atomic_dec_and_test() order the data->head read and this + * by atomic_dec_and_test() order the buffer->head read and this * write. */ - data->user_page->data_head = head; + buffer->user_page->data_head = head; /* * Now check if we missed an update, rely on the (compiler) - * barrier in atomic_dec_and_test() to re-read data->head. + * barrier in atomic_dec_and_test() to re-read buffer->head. */ - if (unlikely(head != local_read(&data->head))) { - local_inc(&data->nest); + if (unlikely(head != local_read(&buffer->head))) { + local_inc(&buffer->nest); goto again; } - if (handle->wakeup != local_read(&data->wakeup)) + if (handle->wakeup != local_read(&buffer->wakeup)) perf_output_wakeup(handle); out: @@ -3063,12 +3063,12 @@ __always_inline void perf_output_copy(struct perf_output_handle *handle, buf += size; handle->size -= size; if (!handle->size) { - struct perf_mmap_data *data = handle->data; + struct perf_buffer *buffer = handle->buffer; handle->page++; - handle->page &= data->nr_pages - 1; - handle->addr = data->data_pages[handle->page]; - handle->size = PAGE_SIZE << page_order(data); + handle->page &= buffer->nr_pages - 1; + handle->addr = buffer->data_pages[handle->page]; + handle->size = PAGE_SIZE << page_order(buffer); } } while (len); } @@ -3077,7 +3077,7 @@ int perf_output_begin(struct perf_output_handle *handle, struct perf_event *event, unsigned int size, int nmi, int sample) { - struct perf_mmap_data *data; + struct perf_buffer *buffer; unsigned long tail, offset, head; int have_lost; struct { @@ -3093,19 +3093,19 @@ int perf_output_begin(struct perf_output_handle *handle, if (event->parent) event = event->parent; - data = rcu_dereference(event->data); - if (!data) + buffer = rcu_dereference(event->buffer); + if (!buffer) goto out; - handle->data = data; + handle->buffer = buffer; handle->event = event; handle->nmi = nmi; handle->sample = sample; - if (!data->nr_pages) + if (!buffer->nr_pages) goto out; - have_lost = local_read(&data->lost); + have_lost = local_read(&buffer->lost); if (have_lost) size += sizeof(lost_event); @@ -3117,30 +3117,30 @@ int perf_output_begin(struct perf_output_handle *handle, * tail pointer. So that all reads will be completed before the * write is issued. */ - tail = ACCESS_ONCE(data->user_page->data_tail); + tail = ACCESS_ONCE(buffer->user_page->data_tail); smp_rmb(); - offset = head = local_read(&data->head); + offset = head = local_read(&buffer->head); head += size; - if (unlikely(!perf_output_space(data, tail, offset, head))) + if (unlikely(!perf_output_space(buffer, tail, offset, head))) goto fail; - } while (local_cmpxchg(&data->head, offset, head) != offset); + } while (local_cmpxchg(&buffer->head, offset, head) != offset); - if (head - local_read(&data->wakeup) > data->watermark) - local_add(data->watermark, &data->wakeup); + if (head - local_read(&buffer->wakeup) > buffer->watermark) + local_add(buffer->watermark, &buffer->wakeup); - handle->page = offset >> (PAGE_SHIFT + page_order(data)); - handle->page &= data->nr_pages - 1; - handle->size = offset & ((PAGE_SIZE << page_order(data)) - 1); - handle->addr = data->data_pages[handle->page]; + handle->page = offset >> (PAGE_SHIFT + page_order(buffer)); + handle->page &= buffer->nr_pages - 1; + handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1); + handle->addr = buffer->data_pages[handle->page]; handle->addr += handle->size; - handle->size = (PAGE_SIZE << page_order(data)) - handle->size; + handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size; if (have_lost) { lost_event.header.type = PERF_RECORD_LOST; lost_event.header.misc = 0; lost_event.header.size = sizeof(lost_event); lost_event.id = event->id; - lost_event.lost = local_xchg(&data->lost, 0); + lost_event.lost = local_xchg(&buffer->lost, 0); perf_output_put(handle, lost_event); } @@ -3148,7 +3148,7 @@ int perf_output_begin(struct perf_output_handle *handle, return 0; fail: - local_inc(&data->lost); + local_inc(&buffer->lost); perf_output_put_handle(handle); out: rcu_read_unlock(); @@ -3159,15 +3159,15 @@ out: void perf_output_end(struct perf_output_handle *handle) { struct perf_event *event = handle->event; - struct perf_mmap_data *data = handle->data; + struct perf_buffer *buffer = handle->buffer; int wakeup_events = event->attr.wakeup_events; if (handle->sample && wakeup_events) { - int events = local_inc_return(&data->events); + int events = local_inc_return(&buffer->events); if (events >= wakeup_events) { - local_sub(wakeup_events, &data->events); - local_inc(&data->wakeup); + local_sub(wakeup_events, &buffer->events); + local_inc(&buffer->wakeup); } } @@ -5010,7 +5010,7 @@ err_size: static int perf_event_set_output(struct perf_event *event, struct perf_event *output_event) { - struct perf_mmap_data *data = NULL, *old_data = NULL; + struct perf_buffer *buffer = NULL, *old_buffer = NULL; int ret = -EINVAL; if (!output_event) @@ -5040,19 +5040,19 @@ set: if (output_event) { /* get the buffer we want to redirect to */ - data = perf_mmap_data_get(output_event); - if (!data) + buffer = perf_buffer_get(output_event); + if (!buffer) goto unlock; } - old_data = event->data; - rcu_assign_pointer(event->data, data); + old_buffer = event->buffer; + rcu_assign_pointer(event->buffer, buffer); ret = 0; unlock: mutex_unlock(&event->mmap_mutex); - if (old_data) - perf_mmap_data_put(old_data); + if (old_buffer) + perf_buffer_put(old_buffer); out: return ret; } -- cgit v1.1 From d57e34fdd60be7ffd0b1d86bfa1a553df86b7172 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 28 May 2010 19:41:35 +0200 Subject: perf: Simplify the ring-buffer logic: make perf_buffer_alloc() do everything needed Currently there are perf_buffer_alloc() + perf_buffer_init() + some separate bits, fold it all into a single perf_buffer_alloc() and only leave the attachment to the event separate. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 61 +++++++++++++++++++++++++++++------------------------ 1 file changed, 34 insertions(+), 27 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 93d5458..f75c9c9 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2369,6 +2369,25 @@ unlock: rcu_read_unlock(); } +static unsigned long perf_data_size(struct perf_buffer *buffer); + +static void +perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags) +{ + long max_size = perf_data_size(buffer); + + if (watermark) + buffer->watermark = min(max_size, watermark); + + if (!buffer->watermark) + buffer->watermark = max_size / 2; + + if (flags & PERF_BUFFER_WRITABLE) + buffer->writable = 1; + + atomic_set(&buffer->refcount, 1); +} + #ifndef CONFIG_PERF_USE_VMALLOC /* @@ -2401,7 +2420,7 @@ static void *perf_mmap_alloc_page(int cpu) } static struct perf_buffer * -perf_buffer_alloc(struct perf_event *event, int nr_pages) +perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) { struct perf_buffer *buffer; unsigned long size; @@ -2414,18 +2433,20 @@ perf_buffer_alloc(struct perf_event *event, int nr_pages) if (!buffer) goto fail; - buffer->user_page = perf_mmap_alloc_page(event->cpu); + buffer->user_page = perf_mmap_alloc_page(cpu); if (!buffer->user_page) goto fail_user_page; for (i = 0; i < nr_pages; i++) { - buffer->data_pages[i] = perf_mmap_alloc_page(event->cpu); + buffer->data_pages[i] = perf_mmap_alloc_page(cpu); if (!buffer->data_pages[i]) goto fail_data_pages; } buffer->nr_pages = nr_pages; + perf_buffer_init(buffer, watermark, flags); + return buffer; fail_data_pages: @@ -2516,7 +2537,7 @@ static void perf_buffer_free(struct perf_buffer *buffer) } static struct perf_buffer * -perf_buffer_alloc(struct perf_event *event, int nr_pages) +perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) { struct perf_buffer *buffer; unsigned long size; @@ -2540,6 +2561,8 @@ perf_buffer_alloc(struct perf_event *event, int nr_pages) buffer->page_order = ilog2(nr_pages); buffer->nr_pages = 1; + perf_buffer_init(buffer, watermark, flags); + return buffer; fail_all_buf: @@ -2591,23 +2614,6 @@ unlock: return ret; } -static void -perf_buffer_init(struct perf_event *event, struct perf_buffer *buffer) -{ - long max_size = perf_data_size(buffer); - - if (event->attr.watermark) { - buffer->watermark = min_t(long, max_size, - event->attr.wakeup_watermark); - } - - if (!buffer->watermark) - buffer->watermark = max_size / 2; - - atomic_set(&buffer->refcount, 1); - rcu_assign_pointer(event->buffer, buffer); -} - static void perf_buffer_free_rcu(struct rcu_head *rcu_head) { struct perf_buffer *buffer; @@ -2682,7 +2688,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) unsigned long vma_size; unsigned long nr_pages; long user_extra, extra; - int ret = 0; + int ret = 0, flags = 0; /* * Don't allow mmap() of inherited per-task counters. This would @@ -2747,15 +2753,16 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) WARN_ON(event->buffer); - buffer = perf_buffer_alloc(event, nr_pages); + if (vma->vm_flags & VM_WRITE) + flags |= PERF_BUFFER_WRITABLE; + + buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark, + event->cpu, flags); if (!buffer) { ret = -ENOMEM; goto unlock; } - - perf_buffer_init(event, buffer); - if (vma->vm_flags & VM_WRITE) - event->buffer->writable = 1; + rcu_assign_pointer(event->buffer, buffer); atomic_long_add(user_extra, &user->locked_vm); event->mmap_locked = extra; -- cgit v1.1 From b5e58793c7a8ec35e72ea6ec6c353499dd189809 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 21 May 2010 14:43:12 +0200 Subject: perf: Add perf_event_count() Create a helper function for those sites that want to read the event count. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index f75c9c9..ab4c0ff 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1736,6 +1736,11 @@ static void __perf_event_read(void *info) event->pmu->read(event); } +static inline u64 perf_event_count(struct perf_event *event) +{ + return atomic64_read(&event->count); +} + static u64 perf_event_read(struct perf_event *event) { /* @@ -1755,7 +1760,7 @@ static u64 perf_event_read(struct perf_event *event) raw_spin_unlock_irqrestore(&ctx->lock, flags); } - return atomic64_read(&event->count); + return perf_event_count(event); } /* @@ -2352,7 +2357,7 @@ void perf_event_update_userpage(struct perf_event *event) ++userpg->lock; barrier(); userpg->index = perf_event_index(event); - userpg->offset = atomic64_read(&event->count); + userpg->offset = perf_event_count(event); if (event->state == PERF_EVENT_STATE_ACTIVE) userpg->offset -= atomic64_read(&event->hw.prev_count); @@ -3211,7 +3216,7 @@ static void perf_output_read_one(struct perf_output_handle *handle, u64 values[4]; int n = 0; - values[n++] = atomic64_read(&event->count); + values[n++] = perf_event_count(event); if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { values[n++] = event->total_time_enabled + atomic64_read(&event->child_total_time_enabled); @@ -3248,7 +3253,7 @@ static void perf_output_read_group(struct perf_output_handle *handle, if (leader != event) leader->pmu->read(leader); - values[n++] = atomic64_read(&leader->count); + values[n++] = perf_event_count(leader); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(leader); @@ -3260,7 +3265,7 @@ static void perf_output_read_group(struct perf_output_handle *handle, if (sub != event) sub->pmu->read(sub); - values[n++] = atomic64_read(&sub->count); + values[n++] = perf_event_count(sub); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(sub); @@ -5369,7 +5374,7 @@ static void sync_child_event(struct perf_event *child_event, if (child_event->attr.inherit_stat) perf_event_read_event(child_event, child); - child_val = atomic64_read(&child_event->count); + child_val = perf_event_count(child_event); /* * Add back the child's count to the parent's count: -- cgit v1.1 From a6e6dea68c18f705957573ee5596097c7e82d0e5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 21 May 2010 14:27:58 +0200 Subject: perf: Add perf_event::child_count Only child counters adding back their values into the parent counter are responsible for cross-cpu updates to event->count. So if we pull that out into a new child_count variable, we get an event->count that is only modified locally. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index ab4c0ff..a395fda 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1738,7 +1738,7 @@ static void __perf_event_read(void *info) static inline u64 perf_event_count(struct perf_event *event) { - return atomic64_read(&event->count); + return atomic64_read(&event->count) + atomic64_read(&event->child_count); } static u64 perf_event_read(struct perf_event *event) @@ -5379,7 +5379,7 @@ static void sync_child_event(struct perf_event *child_event, /* * Add back the child's count to the parent's count: */ - atomic64_add(child_val, &parent_event->count); + atomic64_add(child_val, &parent_event->child_count); atomic64_add(child_event->total_time_enabled, &parent_event->child_total_time_enabled); atomic64_add(child_event->total_time_running, -- cgit v1.1 From e78505958cf123048fb48cb56b79cebb8edd15fb Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 21 May 2010 14:43:08 +0200 Subject: perf: Convert perf_event to local_t Since now all modification to event->count (and ->prev_count and ->period_left) are local to a cpu, change then to local64_t so we avoid the LOCK'ed ops. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index a395fda..97c7301 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1148,9 +1148,9 @@ static void __perf_event_sync_stat(struct perf_event *event, * In order to keep per-task stats reliable we need to flip the event * values when we flip the contexts. */ - value = atomic64_read(&next_event->count); - value = atomic64_xchg(&event->count, value); - atomic64_set(&next_event->count, value); + value = local64_read(&next_event->count); + value = local64_xchg(&event->count, value); + local64_set(&next_event->count, value); swap(event->total_time_enabled, next_event->total_time_enabled); swap(event->total_time_running, next_event->total_time_running); @@ -1540,10 +1540,10 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) hwc->sample_period = sample_period; - if (atomic64_read(&hwc->period_left) > 8*sample_period) { + if (local64_read(&hwc->period_left) > 8*sample_period) { perf_disable(); perf_event_stop(event); - atomic64_set(&hwc->period_left, 0); + local64_set(&hwc->period_left, 0); perf_event_start(event); perf_enable(); } @@ -1584,7 +1584,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) perf_disable(); event->pmu->read(event); - now = atomic64_read(&event->count); + now = local64_read(&event->count); delta = now - hwc->freq_count_stamp; hwc->freq_count_stamp = now; @@ -1738,7 +1738,7 @@ static void __perf_event_read(void *info) static inline u64 perf_event_count(struct perf_event *event) { - return atomic64_read(&event->count) + atomic64_read(&event->child_count); + return local64_read(&event->count) + atomic64_read(&event->child_count); } static u64 perf_event_read(struct perf_event *event) @@ -2141,7 +2141,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) static void perf_event_reset(struct perf_event *event) { (void)perf_event_read(event); - atomic64_set(&event->count, 0); + local64_set(&event->count, 0); perf_event_update_userpage(event); } @@ -2359,7 +2359,7 @@ void perf_event_update_userpage(struct perf_event *event) userpg->index = perf_event_index(event); userpg->offset = perf_event_count(event); if (event->state == PERF_EVENT_STATE_ACTIVE) - userpg->offset -= atomic64_read(&event->hw.prev_count); + userpg->offset -= local64_read(&event->hw.prev_count); userpg->time_enabled = event->total_time_enabled + atomic64_read(&event->child_total_time_enabled); @@ -4035,14 +4035,14 @@ static u64 perf_swevent_set_period(struct perf_event *event) hwc->last_period = hwc->sample_period; again: - old = val = atomic64_read(&hwc->period_left); + old = val = local64_read(&hwc->period_left); if (val < 0) return 0; nr = div64_u64(period + val, period); offset = nr * period; val -= offset; - if (atomic64_cmpxchg(&hwc->period_left, old, val) != old) + if (local64_cmpxchg(&hwc->period_left, old, val) != old) goto again; return nr; @@ -4081,7 +4081,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr, { struct hw_perf_event *hwc = &event->hw; - atomic64_add(nr, &event->count); + local64_add(nr, &event->count); if (!regs) return; @@ -4092,7 +4092,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr, if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) return perf_swevent_overflow(event, 1, nmi, data, regs); - if (atomic64_add_negative(nr, &hwc->period_left)) + if (local64_add_negative(nr, &hwc->period_left)) return; perf_swevent_overflow(event, 0, nmi, data, regs); @@ -4383,8 +4383,8 @@ static void cpu_clock_perf_event_update(struct perf_event *event) u64 now; now = cpu_clock(cpu); - prev = atomic64_xchg(&event->hw.prev_count, now); - atomic64_add(now - prev, &event->count); + prev = local64_xchg(&event->hw.prev_count, now); + local64_add(now - prev, &event->count); } static int cpu_clock_perf_event_enable(struct perf_event *event) @@ -4392,7 +4392,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; int cpu = raw_smp_processor_id(); - atomic64_set(&hwc->prev_count, cpu_clock(cpu)); + local64_set(&hwc->prev_count, cpu_clock(cpu)); perf_swevent_start_hrtimer(event); return 0; @@ -4424,9 +4424,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now) u64 prev; s64 delta; - prev = atomic64_xchg(&event->hw.prev_count, now); + prev = local64_xchg(&event->hw.prev_count, now); delta = now - prev; - atomic64_add(delta, &event->count); + local64_add(delta, &event->count); } static int task_clock_perf_event_enable(struct perf_event *event) @@ -4436,7 +4436,7 @@ static int task_clock_perf_event_enable(struct perf_event *event) now = event->ctx->time; - atomic64_set(&hwc->prev_count, now); + local64_set(&hwc->prev_count, now); perf_swevent_start_hrtimer(event); @@ -4879,7 +4879,7 @@ perf_event_alloc(struct perf_event_attr *attr, hwc->sample_period = 1; hwc->last_period = hwc->sample_period; - atomic64_set(&hwc->period_left, hwc->sample_period); + local64_set(&hwc->period_left, hwc->sample_period); /* * we currently do not support PERF_FORMAT_GROUP on inherited events @@ -5313,7 +5313,7 @@ inherit_event(struct perf_event *parent_event, hwc->sample_period = sample_period; hwc->last_period = sample_period; - atomic64_set(&hwc->period_left, sample_period); + local64_set(&hwc->period_left, sample_period); } child_event->overflow_handler = parent_event->overflow_handler; -- cgit v1.1 From 039ca4e74a1cf60bd7487324a564ecf5c981f254 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 26 May 2010 17:22:17 +0800 Subject: tracing: Remove kmemtrace ftrace plugin We have been resisting new ftrace plugins and removing existing ones, and kmemtrace has been superseded by kmem trace events and perf-kmem, so we remove it. Signed-off-by: Li Zefan Acked-by: Pekka Enberg Acked-by: Eduard - Gabriel Munteanu Cc: Ingo Molnar Cc: Steven Rostedt [ remove kmemtrace from the makefile, handle slob too ] Signed-off-by: Frederic Weisbecker --- kernel/trace/Kconfig | 20 -- kernel/trace/Makefile | 1 - kernel/trace/kmemtrace.c | 529 ------------------------------------------- kernel/trace/trace.h | 12 - kernel/trace/trace_entries.h | 35 --- 5 files changed, 597 deletions(-) delete mode 100644 kernel/trace/kmemtrace.c (limited to 'kernel') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 572992a..f669092 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -354,26 +354,6 @@ config STACK_TRACER Say N if unsure. -config KMEMTRACE - bool "Trace SLAB allocations" - select GENERIC_TRACER - help - kmemtrace provides tracing for slab allocator functions, such as - kmalloc, kfree, kmem_cache_alloc, kmem_cache_free, etc. Collected - data is then fed to the userspace application in order to analyse - allocation hotspots, internal fragmentation and so on, making it - possible to see how well an allocator performs, as well as debug - and profile kernel code. - - This requires an userspace application to use. See - Documentation/trace/kmemtrace.txt for more information. - - Saying Y will make the kernel somewhat larger and slower. However, - if you disable kmemtrace at run-time or boot-time, the performance - impact is minimal (depending on the arch the kernel is built for). - - If unsure, say N. - config WORKQUEUE_TRACER bool "Trace workqueues" select GENERIC_TRACER diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index c3aaeba..469a1c7 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -40,7 +40,6 @@ obj-$(CONFIG_STACK_TRACER) += trace_stack.o obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o -obj-$(CONFIG_KMEMTRACE) += kmemtrace.o obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o ifeq ($(CONFIG_BLOCK),y) diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c deleted file mode 100644 index bbfc1bb..0000000 --- a/kernel/trace/kmemtrace.c +++ /dev/null @@ -1,529 +0,0 @@ -/* - * Memory allocator tracing - * - * Copyright (C) 2008 Eduard - Gabriel Munteanu - * Copyright (C) 2008 Pekka Enberg - * Copyright (C) 2008 Frederic Weisbecker - */ - -#include -#include -#include -#include -#include - -#include - -#include "trace_output.h" -#include "trace.h" - -/* Select an alternative, minimalistic output than the original one */ -#define TRACE_KMEM_OPT_MINIMAL 0x1 - -static struct tracer_opt kmem_opts[] = { - /* Default disable the minimalistic output */ - { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) }, - { } -}; - -static struct tracer_flags kmem_tracer_flags = { - .val = 0, - .opts = kmem_opts -}; - -static struct trace_array *kmemtrace_array; - -/* Trace allocations */ -static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id, - unsigned long call_site, - const void *ptr, - size_t bytes_req, - size_t bytes_alloc, - gfp_t gfp_flags, - int node) -{ - struct ftrace_event_call *call = &event_kmem_alloc; - struct trace_array *tr = kmemtrace_array; - struct kmemtrace_alloc_entry *entry; - struct ring_buffer_event *event; - - event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); - if (!event) - return; - - entry = ring_buffer_event_data(event); - tracing_generic_entry_update(&entry->ent, 0, 0); - - entry->ent.type = TRACE_KMEM_ALLOC; - entry->type_id = type_id; - entry->call_site = call_site; - entry->ptr = ptr; - entry->bytes_req = bytes_req; - entry->bytes_alloc = bytes_alloc; - entry->gfp_flags = gfp_flags; - entry->node = node; - - if (!filter_check_discard(call, entry, tr->buffer, event)) - ring_buffer_unlock_commit(tr->buffer, event); - - trace_wake_up(); -} - -static inline void kmemtrace_free(enum kmemtrace_type_id type_id, - unsigned long call_site, - const void *ptr) -{ - struct ftrace_event_call *call = &event_kmem_free; - struct trace_array *tr = kmemtrace_array; - struct kmemtrace_free_entry *entry; - struct ring_buffer_event *event; - - event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); - if (!event) - return; - entry = ring_buffer_event_data(event); - tracing_generic_entry_update(&entry->ent, 0, 0); - - entry->ent.type = TRACE_KMEM_FREE; - entry->type_id = type_id; - entry->call_site = call_site; - entry->ptr = ptr; - - if (!filter_check_discard(call, entry, tr->buffer, event)) - ring_buffer_unlock_commit(tr->buffer, event); - - trace_wake_up(); -} - -static void kmemtrace_kmalloc(void *ignore, - unsigned long call_site, - const void *ptr, - size_t bytes_req, - size_t bytes_alloc, - gfp_t gfp_flags) -{ - kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr, - bytes_req, bytes_alloc, gfp_flags, -1); -} - -static void kmemtrace_kmem_cache_alloc(void *ignore, - unsigned long call_site, - const void *ptr, - size_t bytes_req, - size_t bytes_alloc, - gfp_t gfp_flags) -{ - kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr, - bytes_req, bytes_alloc, gfp_flags, -1); -} - -static void kmemtrace_kmalloc_node(void *ignore, - unsigned long call_site, - const void *ptr, - size_t bytes_req, - size_t bytes_alloc, - gfp_t gfp_flags, - int node) -{ - kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr, - bytes_req, bytes_alloc, gfp_flags, node); -} - -static void kmemtrace_kmem_cache_alloc_node(void *ignore, - unsigned long call_site, - const void *ptr, - size_t bytes_req, - size_t bytes_alloc, - gfp_t gfp_flags, - int node) -{ - kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr, - bytes_req, bytes_alloc, gfp_flags, node); -} - -static void -kmemtrace_kfree(void *ignore, unsigned long call_site, const void *ptr) -{ - kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr); -} - -static void kmemtrace_kmem_cache_free(void *ignore, - unsigned long call_site, const void *ptr) -{ - kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr); -} - -static int kmemtrace_start_probes(void) -{ - int err; - - err = register_trace_kmalloc(kmemtrace_kmalloc, NULL); - if (err) - return err; - err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL); - if (err) - return err; - err = register_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL); - if (err) - return err; - err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL); - if (err) - return err; - err = register_trace_kfree(kmemtrace_kfree, NULL); - if (err) - return err; - err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL); - - return err; -} - -static void kmemtrace_stop_probes(void) -{ - unregister_trace_kmalloc(kmemtrace_kmalloc, NULL); - unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL); - unregister_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL); - unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL); - unregister_trace_kfree(kmemtrace_kfree, NULL); - unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL); -} - -static int kmem_trace_init(struct trace_array *tr) -{ - kmemtrace_array = tr; - - tracing_reset_online_cpus(tr); - - kmemtrace_start_probes(); - - return 0; -} - -static void kmem_trace_reset(struct trace_array *tr) -{ - kmemtrace_stop_probes(); -} - -static void kmemtrace_headers(struct seq_file *s) -{ - /* Don't need headers for the original kmemtrace output */ - if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)) - return; - - seq_printf(s, "#\n"); - seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS " - " POINTER NODE CALLER\n"); - seq_printf(s, "# FREE | | | | " - " | | | |\n"); - seq_printf(s, "# |\n\n"); -} - -/* - * The following functions give the original output from kmemtrace, - * plus the origin CPU, since reordering occurs in-kernel now. - */ - -#define KMEMTRACE_USER_ALLOC 0 -#define KMEMTRACE_USER_FREE 1 - -struct kmemtrace_user_event { - u8 event_id; - u8 type_id; - u16 event_size; - u32 cpu; - u64 timestamp; - unsigned long call_site; - unsigned long ptr; -}; - -struct kmemtrace_user_event_alloc { - size_t bytes_req; - size_t bytes_alloc; - unsigned gfp_flags; - int node; -}; - -static enum print_line_t -kmemtrace_print_alloc(struct trace_iterator *iter, int flags, - struct trace_event *event) -{ - struct trace_seq *s = &iter->seq; - struct kmemtrace_alloc_entry *entry; - int ret; - - trace_assign_type(entry, iter->ent); - - ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu " - "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n", - entry->type_id, (void *)entry->call_site, (unsigned long)entry->ptr, - (unsigned long)entry->bytes_req, (unsigned long)entry->bytes_alloc, - (unsigned long)entry->gfp_flags, entry->node); - - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - return TRACE_TYPE_HANDLED; -} - -static enum print_line_t -kmemtrace_print_free(struct trace_iterator *iter, int flags, - struct trace_event *event) -{ - struct trace_seq *s = &iter->seq; - struct kmemtrace_free_entry *entry; - int ret; - - trace_assign_type(entry, iter->ent); - - ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu\n", - entry->type_id, (void *)entry->call_site, - (unsigned long)entry->ptr); - - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - return TRACE_TYPE_HANDLED; -} - -static enum print_line_t -kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags, - struct trace_event *event) -{ - struct trace_seq *s = &iter->seq; - struct kmemtrace_alloc_entry *entry; - struct kmemtrace_user_event *ev; - struct kmemtrace_user_event_alloc *ev_alloc; - - trace_assign_type(entry, iter->ent); - - ev = trace_seq_reserve(s, sizeof(*ev)); - if (!ev) - return TRACE_TYPE_PARTIAL_LINE; - - ev->event_id = KMEMTRACE_USER_ALLOC; - ev->type_id = entry->type_id; - ev->event_size = sizeof(*ev) + sizeof(*ev_alloc); - ev->cpu = iter->cpu; - ev->timestamp = iter->ts; - ev->call_site = entry->call_site; - ev->ptr = (unsigned long)entry->ptr; - - ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc)); - if (!ev_alloc) - return TRACE_TYPE_PARTIAL_LINE; - - ev_alloc->bytes_req = entry->bytes_req; - ev_alloc->bytes_alloc = entry->bytes_alloc; - ev_alloc->gfp_flags = entry->gfp_flags; - ev_alloc->node = entry->node; - - return TRACE_TYPE_HANDLED; -} - -static enum print_line_t -kmemtrace_print_free_user(struct trace_iterator *iter, int flags, - struct trace_event *event) -{ - struct trace_seq *s = &iter->seq; - struct kmemtrace_free_entry *entry; - struct kmemtrace_user_event *ev; - - trace_assign_type(entry, iter->ent); - - ev = trace_seq_reserve(s, sizeof(*ev)); - if (!ev) - return TRACE_TYPE_PARTIAL_LINE; - - ev->event_id = KMEMTRACE_USER_FREE; - ev->type_id = entry->type_id; - ev->event_size = sizeof(*ev); - ev->cpu = iter->cpu; - ev->timestamp = iter->ts; - ev->call_site = entry->call_site; - ev->ptr = (unsigned long)entry->ptr; - - return TRACE_TYPE_HANDLED; -} - -/* The two other following provide a more minimalistic output */ -static enum print_line_t -kmemtrace_print_alloc_compress(struct trace_iterator *iter) -{ - struct kmemtrace_alloc_entry *entry; - struct trace_seq *s = &iter->seq; - int ret; - - trace_assign_type(entry, iter->ent); - - /* Alloc entry */ - ret = trace_seq_printf(s, " + "); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - /* Type */ - switch (entry->type_id) { - case KMEMTRACE_TYPE_KMALLOC: - ret = trace_seq_printf(s, "K "); - break; - case KMEMTRACE_TYPE_CACHE: - ret = trace_seq_printf(s, "C "); - break; - case KMEMTRACE_TYPE_PAGES: - ret = trace_seq_printf(s, "P "); - break; - default: - ret = trace_seq_printf(s, "? "); - } - - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - /* Requested */ - ret = trace_seq_printf(s, "%4zu ", entry->bytes_req); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - /* Allocated */ - ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - /* Flags - * TODO: would be better to see the name of the GFP flag names - */ - ret = trace_seq_printf(s, "%08x ", entry->gfp_flags); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - /* Pointer to allocated */ - ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - /* Node and call site*/ - ret = trace_seq_printf(s, "%4d %pf\n", entry->node, - (void *)entry->call_site); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - return TRACE_TYPE_HANDLED; -} - -static enum print_line_t -kmemtrace_print_free_compress(struct trace_iterator *iter) -{ - struct kmemtrace_free_entry *entry; - struct trace_seq *s = &iter->seq; - int ret; - - trace_assign_type(entry, iter->ent); - - /* Free entry */ - ret = trace_seq_printf(s, " - "); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - /* Type */ - switch (entry->type_id) { - case KMEMTRACE_TYPE_KMALLOC: - ret = trace_seq_printf(s, "K "); - break; - case KMEMTRACE_TYPE_CACHE: - ret = trace_seq_printf(s, "C "); - break; - case KMEMTRACE_TYPE_PAGES: - ret = trace_seq_printf(s, "P "); - break; - default: - ret = trace_seq_printf(s, "? "); - } - - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - /* Skip requested/allocated/flags */ - ret = trace_seq_printf(s, " "); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - /* Pointer to allocated */ - ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - /* Skip node and print call site*/ - ret = trace_seq_printf(s, " %pf\n", (void *)entry->call_site); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - return TRACE_TYPE_HANDLED; -} - -static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter) -{ - struct trace_entry *entry = iter->ent; - - if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)) - return TRACE_TYPE_UNHANDLED; - - switch (entry->type) { - case TRACE_KMEM_ALLOC: - return kmemtrace_print_alloc_compress(iter); - case TRACE_KMEM_FREE: - return kmemtrace_print_free_compress(iter); - default: - return TRACE_TYPE_UNHANDLED; - } -} - -static struct trace_event_functions kmem_trace_alloc_funcs = { - .trace = kmemtrace_print_alloc, - .binary = kmemtrace_print_alloc_user, -}; - -static struct trace_event kmem_trace_alloc = { - .type = TRACE_KMEM_ALLOC, - .funcs = &kmem_trace_alloc_funcs, -}; - -static struct trace_event_functions kmem_trace_free_funcs = { - .trace = kmemtrace_print_free, - .binary = kmemtrace_print_free_user, -}; - -static struct trace_event kmem_trace_free = { - .type = TRACE_KMEM_FREE, - .funcs = &kmem_trace_free_funcs, -}; - -static struct tracer kmem_tracer __read_mostly = { - .name = "kmemtrace", - .init = kmem_trace_init, - .reset = kmem_trace_reset, - .print_line = kmemtrace_print_line, - .print_header = kmemtrace_headers, - .flags = &kmem_tracer_flags -}; - -void kmemtrace_init(void) -{ - /* earliest opportunity to start kmem tracing */ -} - -static int __init init_kmem_tracer(void) -{ - if (!register_ftrace_event(&kmem_trace_alloc)) { - pr_warning("Warning: could not register kmem events\n"); - return 1; - } - - if (!register_ftrace_event(&kmem_trace_free)) { - pr_warning("Warning: could not register kmem events\n"); - return 1; - } - - if (register_tracer(&kmem_tracer) != 0) { - pr_warning("Warning: could not register the kmem tracer\n"); - return 1; - } - - return 0; -} -device_initcall(init_kmem_tracer); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 75a5e80..075cd2e 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -30,19 +29,12 @@ enum trace_type { TRACE_GRAPH_RET, TRACE_GRAPH_ENT, TRACE_USER_STACK, - TRACE_KMEM_ALLOC, - TRACE_KMEM_FREE, TRACE_BLK, TRACE_KSYM, __TRACE_LAST_TYPE, }; -enum kmemtrace_type_id { - KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */ - KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */ - KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */ -}; #undef __field #define __field(type, item) type item; @@ -208,10 +200,6 @@ extern void __ftrace_bad_type(void); TRACE_GRAPH_ENT); \ IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ TRACE_GRAPH_RET); \ - IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ - TRACE_KMEM_ALLOC); \ - IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ - TRACE_KMEM_FREE); \ IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\ __ftrace_bad_type(); \ } while (0) diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index c293364..13abc15 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -291,41 +291,6 @@ FTRACE_ENTRY(branch, trace_branch, __entry->func, __entry->file, __entry->correct) ); -FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry, - - TRACE_KMEM_ALLOC, - - F_STRUCT( - __field( enum kmemtrace_type_id, type_id ) - __field( unsigned long, call_site ) - __field( const void *, ptr ) - __field( size_t, bytes_req ) - __field( size_t, bytes_alloc ) - __field( gfp_t, gfp_flags ) - __field( int, node ) - ), - - F_printk("type:%u call_site:%lx ptr:%p req:%zi alloc:%zi" - " flags:%x node:%d", - __entry->type_id, __entry->call_site, __entry->ptr, - __entry->bytes_req, __entry->bytes_alloc, - __entry->gfp_flags, __entry->node) -); - -FTRACE_ENTRY(kmem_free, kmemtrace_free_entry, - - TRACE_KMEM_FREE, - - F_STRUCT( - __field( enum kmemtrace_type_id, type_id ) - __field( unsigned long, call_site ) - __field( const void *, ptr ) - ), - - F_printk("type:%u call_site:%lx ptr:%p", - __entry->type_id, __entry->call_site, __entry->ptr) -); - FTRACE_ENTRY(ksym_trace, ksym_trace_entry, TRACE_KSYM, -- cgit v1.1 From 551d55a944b143ef26fbd482d1c463199d6f65cf Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Sat, 17 Apr 2010 08:48:42 -0400 Subject: tree/tiny rcu: Add debug RCU head objects Helps finding racy users of call_rcu(), which results in hangs because list entries are overwritten and/or skipped. Changelog since v4: - Bissectability is now OK - Now generate a WARN_ON_ONCE() for non-initialized rcu_head passed to call_rcu(). Statically initialized objects are detected with object_is_static(). - Rename rcu_head_init_on_stack to init_rcu_head_on_stack. - Remove init_rcu_head() completely. Changelog since v3: - Include comments from Lai Jiangshan This new patch version is based on the debugobjects with the newly introduced "active state" tracker. Non-initialized entries are all considered as "statically initialized". An activation fixup (triggered by call_rcu()) takes care of performing the debug object initialization without issuing any warning. Since we cannot increase the size of struct rcu_head, I don't see much room to put an identifier for statically initialized rcu_head structures. So for now, we have to live without "activation without explicit init" detection. But the main purpose of this debug option is to detect double-activations (double call_rcu() use of a rcu_head before the callback is executed), which is correctly addressed here. This also detects potential internal RCU callback corruption, which would cause the callbacks to be executed twice. Signed-off-by: Mathieu Desnoyers CC: David S. Miller CC: "Paul E. McKenney" CC: akpm@linux-foundation.org CC: mingo@elte.hu CC: laijs@cn.fujitsu.com CC: dipankar@in.ibm.com CC: josh@joshtriplett.org CC: dvhltc@us.ibm.com CC: niv@us.ibm.com CC: tglx@linutronix.de CC: peterz@infradead.org CC: rostedt@goodmis.org CC: Valdis.Kletnieks@vt.edu CC: dhowells@redhat.com CC: eric.dumazet@gmail.com CC: Alexey Dobriyan Signed-off-by: Paul E. McKenney Reviewed-by: Lai Jiangshan --- kernel/rcupdate.c | 160 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ kernel/rcutiny.c | 2 + kernel/rcutree.c | 2 + 3 files changed, 164 insertions(+) (limited to 'kernel') diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 72a8dc9..4d16983 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -114,3 +114,163 @@ int rcu_my_thread_group_empty(void) } EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty); #endif /* #ifdef CONFIG_PROVE_RCU */ + +#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD +static inline void debug_init_rcu_head(struct rcu_head *head) +{ + debug_object_init(head, &rcuhead_debug_descr); +} + +static inline void debug_rcu_head_free(struct rcu_head *head) +{ + debug_object_free(head, &rcuhead_debug_descr); +} + +/* + * fixup_init is called when: + * - an active object is initialized + */ +static int rcuhead_fixup_init(void *addr, enum debug_obj_state state) +{ + struct rcu_head *head = addr; + + switch (state) { + case ODEBUG_STATE_ACTIVE: + /* + * Ensure that queued callbacks are all executed. + * If we detect that we are nested in a RCU read-side critical + * section, we should simply fail, otherwise we would deadlock. + */ + if (rcu_preempt_depth() != 0 || preempt_count() != 0 || + irqs_disabled()) { + WARN_ON(1); + return 0; + } + rcu_barrier(); + rcu_barrier_sched(); + rcu_barrier_bh(); + debug_object_init(head, &rcuhead_debug_descr); + return 1; + default: + return 0; + } +} + +/* + * fixup_activate is called when: + * - an active object is activated + * - an unknown object is activated (might be a statically initialized object) + * Activation is performed internally by call_rcu(). + */ +static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state) +{ + struct rcu_head *head = addr; + + switch (state) { + + case ODEBUG_STATE_NOTAVAILABLE: + /* + * This is not really a fixup. We just make sure that it is + * tracked in the object tracker. + */ + debug_object_init(head, &rcuhead_debug_descr); + debug_object_activate(head, &rcuhead_debug_descr); + return 0; + + case ODEBUG_STATE_ACTIVE: + /* + * Ensure that queued callbacks are all executed. + * If we detect that we are nested in a RCU read-side critical + * section, we should simply fail, otherwise we would deadlock. + */ + if (rcu_preempt_depth() != 0 || preempt_count() != 0 || + irqs_disabled()) { + WARN_ON(1); + return 0; + } + rcu_barrier(); + rcu_barrier_sched(); + rcu_barrier_bh(); + debug_object_activate(head, &rcuhead_debug_descr); + return 1; + default: + return 0; + } +} + +/* + * fixup_free is called when: + * - an active object is freed + */ +static int rcuhead_fixup_free(void *addr, enum debug_obj_state state) +{ + struct rcu_head *head = addr; + + switch (state) { + case ODEBUG_STATE_ACTIVE: + /* + * Ensure that queued callbacks are all executed. + * If we detect that we are nested in a RCU read-side critical + * section, we should simply fail, otherwise we would deadlock. + */ +#ifndef CONFIG_PREEMPT + WARN_ON(1); + return 0; +#else + if (rcu_preempt_depth() != 0 || preempt_count() != 0 || + irqs_disabled()) { + WARN_ON(1); + return 0; + } + rcu_barrier(); + rcu_barrier_sched(); + rcu_barrier_bh(); + debug_object_free(head, &rcuhead_debug_descr); + return 1; +#endif + default: + return 0; + } +} + +/** + * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects + * @head: pointer to rcu_head structure to be initialized + * + * This function informs debugobjects of a new rcu_head structure that + * has been allocated as an auto variable on the stack. This function + * is not required for rcu_head structures that are statically defined or + * that are dynamically allocated on the heap. This function has no + * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. + */ +void init_rcu_head_on_stack(struct rcu_head *head) +{ + debug_object_init_on_stack(head, &rcuhead_debug_descr); +} +EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); + +/** + * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects + * @head: pointer to rcu_head structure to be initialized + * + * This function informs debugobjects that an on-stack rcu_head structure + * is about to go out of scope. As with init_rcu_head_on_stack(), this + * function is not required for rcu_head structures that are statically + * defined or that are dynamically allocated on the heap. Also as with + * init_rcu_head_on_stack(), this function has no effect for + * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. + */ +void destroy_rcu_head_on_stack(struct rcu_head *head) +{ + debug_object_free(head, &rcuhead_debug_descr); +} +EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); + +struct debug_obj_descr rcuhead_debug_descr = { + .name = "rcu_head", + .fixup_init = rcuhead_fixup_init, + .fixup_activate = rcuhead_fixup_activate, + .fixup_free = rcuhead_fixup_free, +}; +EXPORT_SYMBOL_GPL(rcuhead_debug_descr); +#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 38729d3..196ec02 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c @@ -169,6 +169,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) while (list) { next = list->next; prefetch(next); + debug_rcu_head_unqueue(list); list->func(list); list = next; } @@ -211,6 +212,7 @@ static void __call_rcu(struct rcu_head *head, { unsigned long flags; + debug_rcu_head_queue(head); head->func = func; head->next = NULL; diff --git a/kernel/rcutree.c b/kernel/rcutree.c index d443734..d5bc439 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -1112,6 +1112,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) while (list) { next = list->next; prefetch(next); + debug_rcu_head_unqueue(list); list->func(list); list = next; if (++count >= rdp->blimit) @@ -1388,6 +1389,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), unsigned long flags; struct rcu_data *rdp; + debug_rcu_head_queue(head); head->func = func; head->next = NULL; -- cgit v1.1 From a25909a4d4a29e272f953e12595bf2f04a292dbd Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 13 May 2010 12:32:28 -0700 Subject: lockdep: Add an in_workqueue_context() lockdep-based test function Some recent uses of RCU make use of workqueues. In these uses, execution within the context of a specific workqueue takes the place of the usual RCU read-side primitives such as rcu_read_lock(), and flushing of workqueues takes the place of the usual RCU grace-period primitives. Checking for correct use of rcu_dereference() in such cases requires a test of whether the code is executing in the context of a particular workqueue. This commit adds an in_workqueue_context() function that provides this test. This new function is only defined when lockdep is enabled, which allows it to be used as the second argument of rcu_dereference_check(). Signed-off-by: Paul E. McKenney --- kernel/workqueue.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 327d2de..59fef15 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -68,6 +68,21 @@ struct workqueue_struct { #endif }; +#ifdef CONFIG_LOCKDEP +/** + * in_workqueue_context() - in context of specified workqueue? + * @wq: the workqueue of interest + * + * Checks lockdep state to see if the current task is executing from + * within a workqueue item. This function exists only if lockdep is + * enabled. + */ +int in_workqueue_context(struct workqueue_struct *wq) +{ + return lock_is_held(&wq->lockdep_map); +} +#endif + #ifdef CONFIG_DEBUG_OBJECTS_WORK static struct debug_obj_descr work_debug_descr; -- cgit v1.1 From 732bee7af3102cad811fb047dee8d15966efe569 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Fri, 11 Jun 2010 12:16:59 +0200 Subject: fix typos concerning "hierarchy" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Uwe Kleine-König Signed-off-by: Jiri Kosina --- kernel/cpuset.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 9a50c5f..1a10978 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -105,7 +105,7 @@ struct cpuset { /* for custom sched domain */ int relax_domain_level; - /* used for walking a cpuset heirarchy */ + /* used for walking a cpuset hierarchy */ struct list_head stack_list; }; -- cgit v1.1 From 5c1469de7545a35a16ff2b902e217044a7d2f8a5 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sun, 13 Jun 2010 03:28:03 +0000 Subject: user_ns: Introduce user_nsmap_uid and user_ns_map_gid. Define what happens when a we view a uid from one user_namespace in another user_namepece. - If the user namespaces are the same no mapping is necessary. - For most cases of difference use overflowuid and overflowgid, the uid and gid currently used for 16bit apis when we have a 32bit uid that does fit in 16bits. Effectively the situation is the same, we want to return a uid or gid that is not assigned to any user. - For the case when we happen to be mapping the uid or gid of the creator of the target user namespace use uid 0 and gid as confusing that user with root is not a problem. Signed-off-by: Eric W. Biederman Acked-by: Serge E. Hallyn Signed-off-by: David S. Miller --- kernel/user_namespace.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) (limited to 'kernel') diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index b2d70d3..2591583 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -9,6 +9,7 @@ #include #include #include +#include #include /* @@ -82,3 +83,46 @@ void free_user_ns(struct kref *kref) schedule_work(&ns->destroyer); } EXPORT_SYMBOL(free_user_ns); + +uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid) +{ + struct user_namespace *tmp; + + if (likely(to == cred->user->user_ns)) + return uid; + + + /* Is cred->user the creator of the target user_ns + * or the creator of one of it's parents? + */ + for ( tmp = to; tmp != &init_user_ns; + tmp = tmp->creator->user_ns ) { + if (cred->user == tmp->creator) { + return (uid_t)0; + } + } + + /* No useful relationship so no mapping */ + return overflowuid; +} + +gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid) +{ + struct user_namespace *tmp; + + if (likely(to == cred->user->user_ns)) + return gid; + + /* Is cred->user the creator of the target user_ns + * or the creator of one of it's parents? + */ + for ( tmp = to; tmp != &init_user_ns; + tmp = tmp->creator->user_ns ) { + if (cred->user == tmp->creator) { + return (gid_t)0; + } + } + + /* No useful relationship so no mapping */ + return overflowgid; +} -- cgit v1.1 From 694f5a1112959a6996cabdb6f8d3003e87dac8a7 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Thu, 10 Jun 2010 09:03:37 +1000 Subject: sched: Fix fix_small_capacity The CPU power test is the wrong way around in fix_small_capacity. This was due to a small changes made in the posted patch on lkml to what was was taken upstream. This patch fixes asymmetric packing for POWER7. Signed-off-by: Michael Neuling Signed-off-by: Peter Zijlstra LKML-Reference: <12629.1276124617@neuling.org> Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 593424f..e82c572 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -2354,7 +2354,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) /* * If ~90% of the cpu_power is still there, we're good. */ - if (group->cpu_power * 32 < group->cpu_power_orig * 29) + if (group->cpu_power * 32 > group->cpu_power_orig * 29) return 1; return 0; -- cgit v1.1 From b6b12294405e6ec029e627c49adf3193829a2685 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Thu, 10 Jun 2010 12:06:21 +1000 Subject: sched: Fix comments to make them DocBook happy Docbook fails in sched_fair.c due to comments added in the asymmetric packing patch series. This fixes these errors. No code changes. Signed-off-by: Michael Neuling Signed-off-by: Peter Zijlstra LKML-Reference: <24737.1276135581@neuling.org> Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index e82c572..5e8f98c 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -2462,7 +2462,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, * @sd: sched_domain whose statistics are to be checked * @sds: sched_domain statistics * @sg: sched_group candidate to be checked for being the busiest - * @sds: sched_group statistics + * @sgs: sched_group statistics + * @this_cpu: the current cpu * * Determine if @sg is a busier group than the previously selected * busiest group. @@ -2588,13 +2589,13 @@ int __weak arch_sd_sibiling_asym_packing(void) * assuming lower CPU number will be equivalent to lower a SMT thread * number. * + * Returns 1 when packing is required and a task should be moved to + * this CPU. The amount of the imbalance is returned in *imbalance. + * * @sd: The sched_domain whose packing is to be checked. * @sds: Statistics of the sched_domain which is to be packed * @this_cpu: The cpu at whose sched_domain we're performing load-balance. * @imbalance: returns amount of imbalanced due to packing. - * - * Returns 1 when packing is required and a task should be moved to - * this CPU. The amount of the imbalance is returned in *imbalance. */ static int check_asym_packing(struct sched_domain *sd, struct sd_lb_stats *sds, -- cgit v1.1 From a44702e8858a071aa0f2365113ea4a2e51c8b575 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 11 Jun 2010 01:09:44 +0200 Subject: sched: __sched_setscheduler: Read the RLIMIT_RTPRIO value lockless __sched_setscheduler() takes lock_task_sighand() to access task->signal. This is not needed since ea6d290c, ->signal can't go away. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra LKML-Reference: <20100610230944.GA25903@redhat.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 132950b..b4427cc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4600,12 +4600,8 @@ recheck: */ if (user && !capable(CAP_SYS_NICE)) { if (rt_policy(policy)) { - unsigned long rlim_rtprio; - - if (!lock_task_sighand(p, &flags)) - return -ESRCH; - rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); - unlock_task_sighand(p, &flags); + unsigned long rlim_rtprio = + task_rlimit(p, RLIMIT_RTPRIO); /* can't set/change the rt policy */ if (policy != p->policy && !rlim_rtprio) -- cgit v1.1 From c32b4fce799d3a6157df9048d03e429956c58818 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 11 Jun 2010 01:09:48 +0200 Subject: sched: task_tick_rt: Remove the obsolete ->signal != NULL check Remove the obsolete ->signal != NULL check in watchdog(). Since ea6d290c ->signal can't be NULL. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra LKML-Reference: <20100610230948.GA25911@redhat.com> Signed-off-by: Ingo Molnar --- kernel/sched_rt.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 8afb953..d10c80e 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1663,9 +1663,6 @@ static void watchdog(struct rq *rq, struct task_struct *p) { unsigned long soft, hard; - if (!p->signal) - return; - /* max may change after cur was read, this will be fixed next tick */ soft = task_rlimit(p, RLIMIT_RTTIME); hard = task_rlimit_max(p, RLIMIT_RTTIME); -- cgit v1.1 From 48286d5088a3ba76de40a6b70221632a49cab7a1 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 11 Jun 2010 01:09:52 +0200 Subject: sched: Remove the obsolete exit_state/signal hacks account_group_xxx() functions check ->exit_state to ensure that current->signal is valid and can't go away. This is not needed since ea6d290c, task->signal is pinned to task_struct. The comment and another hack in account_group_exec_runtime() refers to task_rq_unlock_wait() which was already removed by b7b8ff63. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra LKML-Reference: <20100610230952.GA25914@redhat.com> Signed-off-by: Ingo Molnar --- kernel/sched_stats.h | 27 +++------------------------ 1 file changed, 3 insertions(+), 24 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 32d2bd4..25c2f96 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h @@ -295,13 +295,7 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next) static inline void account_group_user_time(struct task_struct *tsk, cputime_t cputime) { - struct thread_group_cputimer *cputimer; - - /* tsk == current, ensure it is safe to use ->signal */ - if (unlikely(tsk->exit_state)) - return; - - cputimer = &tsk->signal->cputimer; + struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; if (!cputimer->running) return; @@ -325,13 +319,7 @@ static inline void account_group_user_time(struct task_struct *tsk, static inline void account_group_system_time(struct task_struct *tsk, cputime_t cputime) { - struct thread_group_cputimer *cputimer; - - /* tsk == current, ensure it is safe to use ->signal */ - if (unlikely(tsk->exit_state)) - return; - - cputimer = &tsk->signal->cputimer; + struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; if (!cputimer->running) return; @@ -355,16 +343,7 @@ static inline void account_group_system_time(struct task_struct *tsk, static inline void account_group_exec_runtime(struct task_struct *tsk, unsigned long long ns) { - struct thread_group_cputimer *cputimer; - struct signal_struct *sig; - - sig = tsk->signal; - /* see __exit_signal()->task_rq_unlock_wait() */ - barrier(); - if (unlikely(!sig)) - return; - - cputimer = &sig->cputimer; + struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; if (!cputimer->running) return; -- cgit v1.1 From bfac7009180901f57f20a73c53c3e57b1ce75a1b Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 11 Jun 2010 01:09:56 +0200 Subject: sched: thread_group_cputime: Simplify, document the "alive" check thread_group_cputime() looks as if it is rcu-safe, but in fact this was wrong until ea6d290c which pins task->signal to task_struct. It checks ->sighand != NULL under rcu, but this can't help if ->signal can go away. Fortunately the caller either holds ->siglock, or it is fastpath_timer_check() which uses current and checks exit_state == 0. - Since ea6d290c commit tsk->signal is stable, we can read it first and avoid the initialization from INIT_CPUTIME. - Even if tsk->signal is always valid, we still have to check it is safe to use next_thread() under rcu_read_lock(). Currently the code checks ->sighand != NULL, change it to use pid_alive() which is commonly used to ensure the task wasn't unhashed before we take rcu_read_lock(). Add the comment to explain this check. - Change the main loop to use the while_each_thread() helper. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra LKML-Reference: <20100610230956.GA25921@redhat.com> Signed-off-by: Ingo Molnar --- kernel/posix-cpu-timers.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 9829646..bf2a650 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -232,31 +232,24 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) { - struct sighand_struct *sighand; - struct signal_struct *sig; + struct signal_struct *sig = tsk->signal; struct task_struct *t; - *times = INIT_CPUTIME; + times->utime = sig->utime; + times->stime = sig->stime; + times->sum_exec_runtime = sig->sum_sched_runtime; rcu_read_lock(); - sighand = rcu_dereference(tsk->sighand); - if (!sighand) + /* make sure we can trust tsk->thread_group list */ + if (!likely(pid_alive(tsk))) goto out; - sig = tsk->signal; - t = tsk; do { times->utime = cputime_add(times->utime, t->utime); times->stime = cputime_add(times->stime, t->stime); times->sum_exec_runtime += t->se.sum_exec_runtime; - - t = next_thread(t); - } while (t != tsk); - - times->utime = cputime_add(times->utime, sig->utime); - times->stime = cputime_add(times->stime, sig->stime); - times->sum_exec_runtime += sig->sum_sched_runtime; + } while_each_thread(tsk, t); out: rcu_read_unlock(); } -- cgit v1.1 From 0bdd2ed4138ec04e09b4f8165981efc99e439f55 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 11 Jun 2010 01:10:18 +0200 Subject: sched: run_posix_cpu_timers: Don't check ->exit_state, use lock_task_sighand() run_posix_cpu_timers() doesn't work if current has already passed exit_notify(). This was needed to prevent the races with do_wait(). Since ea6d290c ->signal is always valid and can't go away. We can remove the "tsk->exit_state == 0" in fastpath_timer_check() and convert run_posix_cpu_timers() to use lock_task_sighand(). Note: it makes sense to take group_leader's sighand instead, the sub-thread still uses CPU after release_task(). But we need more changes to do this. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra LKML-Reference: <20100610231018.GA25942@redhat.com> Signed-off-by: Ingo Molnar --- kernel/posix-cpu-timers.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index bf2a650..d5dbef5 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -1272,10 +1272,6 @@ static inline int fastpath_timer_check(struct task_struct *tsk) { struct signal_struct *sig; - /* tsk == current, ensure it is safe to use ->signal/sighand */ - if (unlikely(tsk->exit_state)) - return 0; - if (!task_cputime_zero(&tsk->cputime_expires)) { struct task_cputime task_sample = { .utime = tsk->utime, @@ -1308,6 +1304,7 @@ void run_posix_cpu_timers(struct task_struct *tsk) { LIST_HEAD(firing); struct k_itimer *timer, *next; + unsigned long flags; BUG_ON(!irqs_disabled()); @@ -1318,7 +1315,8 @@ void run_posix_cpu_timers(struct task_struct *tsk) if (!fastpath_timer_check(tsk)) return; - spin_lock(&tsk->sighand->siglock); + if (!lock_task_sighand(tsk, &flags)) + return; /* * Here we take off tsk->signal->cpu_timers[N] and * tsk->cpu_timers[N] all the timers that are firing, and @@ -1340,7 +1338,7 @@ void run_posix_cpu_timers(struct task_struct *tsk) * that gets the timer lock before we do will give it up and * spin until we've taken care of that timer below. */ - spin_unlock(&tsk->sighand->siglock); + unlock_task_sighand(tsk, &flags); /* * Now that all the timers on our list have the firing flag, -- cgit v1.1 From 8d1f431cbec115a780cd551ab1b4955c125f8d31 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 11 Jun 2010 20:04:46 +0200 Subject: sched: Fix the racy usage of thread_group_cputimer() in fastpath_timer_check() fastpath_timer_check()->thread_group_cputimer() is racy and unneeded. It is racy because another thread can clear ->running before thread_group_cputimer() takes cputimer->lock. In this case thread_group_cputimer() will set ->running = true again and call thread_group_cputime(). But since we do not hold tasklist or siglock, we can race with fork/exit and copy the wrong results into cputimer->cputime. It is unneeded because if ->running == true we can just use the numbers in cputimer->cputime we already have. Change fastpath_timer_check() to copy cputimer->cputime into the local variable under cputimer->lock. We do not re-check ->running under cputimer->lock, run_posix_cpu_timers() does this check later. Note: we can add more optimizations on top of this change. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra LKML-Reference: <20100611180446.GA13025@redhat.com> Signed-off-by: Ingo Molnar --- kernel/posix-cpu-timers.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index d5dbef5..f66bdd3 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -1287,7 +1287,10 @@ static inline int fastpath_timer_check(struct task_struct *tsk) if (sig->cputimer.running) { struct task_cputime group_sample; - thread_group_cputimer(tsk, &group_sample); + spin_lock(&sig->cputimer.lock); + group_sample = sig->cputimer.cputime; + spin_unlock(&sig->cputimer.lock); + if (task_cputime_expired(&group_sample, &sig->cputime_expires)) return 1; } -- cgit v1.1 From 0b2e918aa99fe6c3b8f163aa323a275ad8577828 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 21 Jun 2010 23:53:31 +0200 Subject: sched, cpuset: Drop __cpuexit from cpu hotplug callbacks Commit 3a101d05 (sched: adjust when cpu_active and cpuset configurations are updated during cpu on/offlining) added hotplug notifiers marked with __cpuexit; however, ia64 drops text in __cpuexit during link unlike x86. This means that functions which are referenced during init but used only for cpu hot unplugging afterwards shouldn't be marked with __cpuexit. Drop __cpuexit from those functions. Reported-by: Tony Luck Signed-off-by: Tejun Heo Acked-by: Tony Luck Cc: Peter Zijlstra LKML-Reference: <4C1FDF5B.1040301@kernel.org> Signed-off-by: Ingo Molnar --- kernel/cpuset.c | 2 +- kernel/sched.c | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 05727dc..7146793 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -2118,7 +2118,7 @@ static void scan_for_empty_cpusets(struct cpuset *root) * Called within get_online_cpus(). Needs to call cgroup_lock() * before calling generate_sched_domains(). */ -void __cpuexit cpuset_update_active_cpus(void) +void cpuset_update_active_cpus(void) { struct sched_domain_attr *attr; cpumask_var_t *doms; diff --git a/kernel/sched.c b/kernel/sched.c index b4427cc..9064e7d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -7477,8 +7477,8 @@ int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) * disabled, cpuset_update_active_cpus() becomes a simple wrapper * around partition_sched_domains(). */ -static int __cpuexit cpuset_cpu_active(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, + void *hcpu) { switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: @@ -7490,8 +7490,8 @@ static int __cpuexit cpuset_cpu_active(struct notifier_block *nfb, } } -static int __cpuexit cpuset_cpu_inactive(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, + void *hcpu) { switch (action & ~CPU_TASKS_FROZEN) { case CPU_DOWN_PREPARE: -- cgit v1.1 From f7136c5150c29846d7a1d09109449d96b2f63445 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Tue, 15 Jun 2010 11:34:34 +0530 Subject: hw_breakpoints: Allow arch-specific cleanup before breakpoint unregistration Certain architectures (such as PowerPC) have a need to clean up data structures before a breakpoint is unregistered. This introduces an arch-specific hook in release_bp_slot() along with a weak definition in the form of a stub function. Signed-off-by: K.Prasad Acked-by: Frederic Weisbecker Signed-off-by: Paul Mackerras --- kernel/hw_breakpoint.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'kernel') diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 7a56b22..71ed3ce 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -242,6 +242,17 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, } /* + * Function to perform processor-specific cleanup during unregistration + */ +__weak void arch_unregister_hw_breakpoint(struct perf_event *bp) +{ + /* + * A weak stub function here for those archs that don't define + * it inside arch/.../kernel/hw_breakpoint.c + */ +} + +/* * Contraints to check before allowing this new breakpoint counter: * * == Non-pinned counter == (Considered as pinned for now) @@ -339,6 +350,7 @@ void release_bp_slot(struct perf_event *bp) { mutex_lock(&nr_bp_mutex); + arch_unregister_hw_breakpoint(bp); __release_bp_slot(bp); mutex_unlock(&nr_bp_mutex); -- cgit v1.1 From 45a73372efe4a63f44aa2e1125d4a777c2fdc8d8 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 23 Jun 2010 23:00:37 +0200 Subject: hw_breakpoints: Fix per task breakpoint tracking Freeing a perf event can happen in several ways. A task calls perf_event_exit_task() right before exiting. This helper will detach all the events from the task context and queue their removal through free_event() if they are child tasks. The task also loses its context reference there. Releasing the breakpoint slot from the constraint table is made from free_event() that calls release_bp_slot(). We count the number of breakpoints this task is running by looking at the task's perf_event_ctxp and iterating through its attached events. But at this time, the reference to this context has been cleaned up already. So looking at the event->ctx instead of task->perf_event_ctxp to count the remaining breakpoints should solve the problem. At least it would for child breakpoints, but not for parent ones. If the parent exits before the child, it will remove all its events from the context but free_event() will be called later, on fd release time. And checking the number of breakpoints the task has attached to its context at this time is unreliable as all events have been removed from the context. To solve this, we keep track of the list of per task breakpoints. On top of it, we maintain our array of numbers of breakpoints used by the tasks. We use the context address as a task id. So, instead of looking at the number of events attached to a context, we walk through our list of per task breakpoints and count the number of breakpoints that use the same ctx than the one to be reserved or released from the constraint table, and update the count on top of this result. In the meantime it solves a bad refcounting, it also solves a warning, reported by Paul. Badness at /home/paulus/kernel/perf/kernel/hw_breakpoint.c:114 NIP: c0000000000cb470 LR: c0000000000cb46c CTR: c00000000032d9b8 REGS: c000000118e7b570 TRAP: 0700 Not tainted (2.6.35-rc3-perf-00008-g76b0f13 ) MSR: 9000000000029032 CR: 44004424 XER: 000fffff TASK = c0000001187dcad0[3143] 'perf' THREAD: c000000118e78000 CPU: 1 GPR00: c0000000000cb46c c000000118e7b7f0 c0000000009866a0 0000000000000020 GPR04: 0000000000000000 000000000000001d 0000000000000000 0000000000000001 GPR08: c0000000009bed68 c00000000086dff8 c000000000a5bf10 0000000000000001 GPR12: 0000000024004422 c00000000ffff200 0000000000000000 0000000000000000 GPR16: 0000000000000000 0000000000000000 0000000000000018 00000000101150f4 GPR20: 0000000010206b40 0000000000000000 0000000000000000 00000000101150f4 GPR24: c0000001199090c0 0000000000000001 0000000000000000 0000000000000001 GPR28: 0000000000000000 0000000000000000 c0000000008ec290 0000000000000000 NIP [c0000000000cb470] .task_bp_pinned+0x5c/0x12c LR [c0000000000cb46c] .task_bp_pinned+0x58/0x12c Call Trace: [c000000118e7b7f0] [c0000000000cb46c] .task_bp_pinned+0x58/0x12c (unreliable) [c000000118e7b8a0] [c0000000000cb584] .toggle_bp_task_slot+0x44/0xe4 [c000000118e7b940] [c0000000000cb6c8] .toggle_bp_slot+0xa4/0x164 [c000000118e7b9f0] [c0000000000cbafc] .release_bp_slot+0x44/0x6c [c000000118e7ba80] [c0000000000c4178] .bp_perf_event_destroy+0x10/0x24 [c000000118e7bb00] [c0000000000c4aec] .free_event+0x180/0x1bc [c000000118e7bbc0] [c0000000000c54c4] .perf_event_release_kernel+0x14c/0x170 Reported-by: Paul Mackerras Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Prasad Cc: Mahesh Salgaonkar Cc: Will Deacon Cc: Jason Wessel --- kernel/hw_breakpoint.c | 78 ++++++++++++++++++++++++++------------------------ 1 file changed, 41 insertions(+), 37 deletions(-) (limited to 'kernel') diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 7a56b22..e34d94d 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -62,6 +63,9 @@ static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); static int nr_slots[TYPE_MAX]; +/* Keep track of the breakpoints attached to tasks */ +static LIST_HEAD(bp_task_head); + static int constraints_initialized; /* Gather the number of total pinned and un-pinned bp in a cpuset */ @@ -103,33 +107,21 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) return 0; } -static int task_bp_pinned(struct task_struct *tsk, enum bp_type_idx type) +/* + * Count the number of breakpoints of the same type and same task. + * The given event must be not on the list. + */ +static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) { - struct perf_event_context *ctx = tsk->perf_event_ctxp; - struct list_head *list; - struct perf_event *bp; - unsigned long flags; + struct perf_event_context *ctx = bp->ctx; + struct perf_event *iter; int count = 0; - if (WARN_ONCE(!ctx, "No perf context for this task")) - return 0; - - list = &ctx->event_list; - - raw_spin_lock_irqsave(&ctx->lock, flags); - - /* - * The current breakpoint counter is not included in the list - * at the open() callback time - */ - list_for_each_entry(bp, list, event_entry) { - if (bp->attr.type == PERF_TYPE_BREAKPOINT) - if (find_slot_idx(bp) == type) - count += hw_breakpoint_weight(bp); + list_for_each_entry(iter, &bp_task_head, hw.bp_list) { + if (iter->ctx == ctx && find_slot_idx(iter) == type) + count += hw_breakpoint_weight(iter); } - raw_spin_unlock_irqrestore(&ctx->lock, flags); - return count; } @@ -149,7 +141,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, if (!tsk) slots->pinned += max_task_bp_pinned(cpu, type); else - slots->pinned += task_bp_pinned(tsk, type); + slots->pinned += task_bp_pinned(bp, type); slots->flexible = per_cpu(nr_bp_flexible[type], cpu); return; @@ -162,7 +154,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, if (!tsk) nr += max_task_bp_pinned(cpu, type); else - nr += task_bp_pinned(tsk, type); + nr += task_bp_pinned(bp, type); if (nr > slots->pinned) slots->pinned = nr; @@ -188,7 +180,7 @@ fetch_this_slot(struct bp_busy_slots *slots, int weight) /* * Add a pinned breakpoint for the given task in our constraint table */ -static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable, +static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable, enum bp_type_idx type, int weight) { unsigned int *tsk_pinned; @@ -196,10 +188,11 @@ static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable, int old_idx = 0; int idx = 0; - old_count = task_bp_pinned(tsk, type); + old_count = task_bp_pinned(bp, type); old_idx = old_count - 1; idx = old_idx + weight; + /* tsk_pinned[n] is the number of tasks having n breakpoints */ tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); if (enable) { tsk_pinned[idx]++; @@ -222,23 +215,30 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, int cpu = bp->cpu; struct task_struct *tsk = bp->ctx->task; + /* Pinned counter cpu profiling */ + if (!tsk) { + + if (enable) + per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; + else + per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight; + return; + } + /* Pinned counter task profiling */ - if (tsk) { - if (cpu >= 0) { - toggle_bp_task_slot(tsk, cpu, enable, type, weight); - return; - } + if (!enable) + list_del(&bp->hw.bp_list); + + if (cpu >= 0) { + toggle_bp_task_slot(bp, cpu, enable, type, weight); + } else { for_each_online_cpu(cpu) - toggle_bp_task_slot(tsk, cpu, enable, type, weight); - return; + toggle_bp_task_slot(bp, cpu, enable, type, weight); } - /* Pinned counter cpu profiling */ if (enable) - per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; - else - per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight; + list_add_tail(&bp->hw.bp_list, &bp_task_head); } /* @@ -301,6 +301,10 @@ static int __reserve_bp_slot(struct perf_event *bp) weight = hw_breakpoint_weight(bp); fetch_bp_busy_slots(&slots, bp, type); + /* + * Simulate the addition of this breakpoint to the constraints + * and see the result. + */ fetch_this_slot(&slots, weight); /* Flexible counters need to keep at least one slot */ -- cgit v1.1 From c9642c49aae1272d7c24008a40ae614470b957a6 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 24 May 2010 16:22:30 +0800 Subject: tracing: Use a global field list for all syscall exit events All syscall exit events have the same fields. The kernel size drops 2.5K: text data bss dec hex filename 7018612 2034376 7251132 16304120 f8c7f8 vmlinux.o.orig 7018612 2031888 7251132 16301632 f8be40 vmlinux.o Signed-off-by: Li Zefan LKML-Reference: <4BFA3746.8070100@cn.fujitsu.com> Signed-off-by: Steven Rostedt --- kernel/trace/trace_syscalls.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 34e3580..bac752f 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -23,6 +23,9 @@ static int syscall_exit_register(struct ftrace_event_call *event, static int syscall_enter_define_fields(struct ftrace_event_call *call); static int syscall_exit_define_fields(struct ftrace_event_call *call); +/* All syscall exit events have the same fields */ +static LIST_HEAD(syscall_exit_fields); + static struct list_head * syscall_get_enter_fields(struct ftrace_event_call *call) { @@ -34,9 +37,7 @@ syscall_get_enter_fields(struct ftrace_event_call *call) static struct list_head * syscall_get_exit_fields(struct ftrace_event_call *call) { - struct syscall_metadata *entry = call->data; - - return &entry->exit_fields; + return &syscall_exit_fields; } struct trace_event_functions enter_syscall_print_funcs = { -- cgit v1.1 From 8728fe501ed562c1b46dde3c195eadec77bca033 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 24 May 2010 16:22:49 +0800 Subject: tracing: Don't allocate common fields for every trace events Every event has the same common fields, so it's a big waste of memory to have a copy of those fields for every event. Signed-off-by: Li Zefan LKML-Reference: <4BFA3759.30105@cn.fujitsu.com> Signed-off-by: Steven Rostedt --- kernel/trace/trace.h | 2 + kernel/trace/trace_events.c | 113 +++++++++++++++++++++---------------- kernel/trace/trace_events_filter.c | 18 +++++- 3 files changed, 81 insertions(+), 52 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 01ce088..cc90ccd 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -698,6 +698,8 @@ struct filter_pred { int pop_n; }; +extern struct list_head ftrace_common_fields; + extern enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not); extern void print_event_filter(struct ftrace_event_call *call, diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index a594f9a..d3b4bdf 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -28,6 +28,7 @@ DEFINE_MUTEX(event_mutex); LIST_HEAD(ftrace_events); +LIST_HEAD(ftrace_common_fields); struct list_head * trace_get_fields(struct ftrace_event_call *event_call) @@ -37,15 +38,11 @@ trace_get_fields(struct ftrace_event_call *event_call) return event_call->class->get_fields(event_call); } -int trace_define_field(struct ftrace_event_call *call, const char *type, - const char *name, int offset, int size, int is_signed, - int filter_type) +static int __trace_define_field(struct list_head *head, const char *type, + const char *name, int offset, int size, + int is_signed, int filter_type) { struct ftrace_event_field *field; - struct list_head *head; - - if (WARN_ON(!call->class)) - return 0; field = kzalloc(sizeof(*field), GFP_KERNEL); if (!field) @@ -68,7 +65,6 @@ int trace_define_field(struct ftrace_event_call *call, const char *type, field->size = size; field->is_signed = is_signed; - head = trace_get_fields(call); list_add(&field->link, head); return 0; @@ -80,17 +76,32 @@ err: return -ENOMEM; } + +int trace_define_field(struct ftrace_event_call *call, const char *type, + const char *name, int offset, int size, int is_signed, + int filter_type) +{ + struct list_head *head; + + if (WARN_ON(!call->class)) + return 0; + + head = trace_get_fields(call); + return __trace_define_field(head, type, name, offset, size, + is_signed, filter_type); +} EXPORT_SYMBOL_GPL(trace_define_field); #define __common_field(type, item) \ - ret = trace_define_field(call, #type, "common_" #item, \ - offsetof(typeof(ent), item), \ - sizeof(ent.item), \ - is_signed_type(type), FILTER_OTHER); \ + ret = __trace_define_field(&ftrace_common_fields, #type, \ + "common_" #item, \ + offsetof(typeof(ent), item), \ + sizeof(ent.item), \ + is_signed_type(type), FILTER_OTHER); \ if (ret) \ return ret; -static int trace_define_common_fields(struct ftrace_event_call *call) +static int trace_define_common_fields(void) { int ret; struct trace_entry ent; @@ -544,32 +555,10 @@ out: return ret; } -static ssize_t -event_format_read(struct file *filp, char __user *ubuf, size_t cnt, - loff_t *ppos) +static void print_event_fields(struct trace_seq *s, struct list_head *head) { - struct ftrace_event_call *call = filp->private_data; struct ftrace_event_field *field; - struct list_head *head; - struct trace_seq *s; - int common_field_count = 5; - char *buf; - int r = 0; - - if (*ppos) - return 0; - - s = kmalloc(sizeof(*s), GFP_KERNEL); - if (!s) - return -ENOMEM; - - trace_seq_init(s); - trace_seq_printf(s, "name: %s\n", call->name); - trace_seq_printf(s, "ID: %d\n", call->event.type); - trace_seq_printf(s, "format:\n"); - - head = trace_get_fields(call); list_for_each_entry_reverse(field, head, link) { /* * Smartly shows the array type(except dynamic array). @@ -584,29 +573,54 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt, array_descriptor = NULL; if (!array_descriptor) { - r = trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;" + trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;" "\tsize:%u;\tsigned:%d;\n", field->type, field->name, field->offset, field->size, !!field->is_signed); } else { - r = trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;" + trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;" "\tsize:%u;\tsigned:%d;\n", (int)(array_descriptor - field->type), field->type, field->name, array_descriptor, field->offset, field->size, !!field->is_signed); } + } +} - if (--common_field_count == 0) - r = trace_seq_printf(s, "\n"); +static ssize_t +event_format_read(struct file *filp, char __user *ubuf, size_t cnt, + loff_t *ppos) +{ + struct ftrace_event_call *call = filp->private_data; + struct list_head *head; + struct trace_seq *s; + char *buf; + int r; - if (!r) - break; - } + if (*ppos) + return 0; - if (r) - r = trace_seq_printf(s, "\nprint fmt: %s\n", - call->print_fmt); + s = kmalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + trace_seq_init(s); + + trace_seq_printf(s, "name: %s\n", call->name); + trace_seq_printf(s, "ID: %d\n", call->event.type); + trace_seq_printf(s, "format:\n"); + + /* print common fields */ + print_event_fields(s, &ftrace_common_fields); + + trace_seq_putc(s, '\n'); + + /* print event specific fields */ + head = trace_get_fields(call); + print_event_fields(s, head); + + r = trace_seq_printf(s, "\nprint fmt: %s\n", call->print_fmt); if (!r) { /* @@ -980,9 +994,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, */ head = trace_get_fields(call); if (list_empty(head)) { - ret = trace_define_common_fields(call); - if (!ret) - ret = call->class->define_fields(call); + ret = call->class->define_fields(call); if (ret < 0) { pr_warning("Could not initialize trace point" " events/%s\n", call->name); @@ -1319,6 +1331,9 @@ static __init int event_trace_init(void) trace_create_file("enable", 0644, d_events, NULL, &ftrace_system_enable_fops); + if (trace_define_common_fields()) + pr_warning("tracing: Failed to allocate common fields"); + for_each_event(call, __start_ftrace_events, __stop_ftrace_events) { /* The linker may leave blanks */ if (!call->name) diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 57bb1bb..330fefd 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -497,12 +497,10 @@ void print_subsystem_event_filter(struct event_subsystem *system, } static struct ftrace_event_field * -find_event_field(struct ftrace_event_call *call, char *name) +__find_event_field(struct list_head *head, char *name) { struct ftrace_event_field *field; - struct list_head *head; - head = trace_get_fields(call); list_for_each_entry(field, head, link) { if (!strcmp(field->name, name)) return field; @@ -511,6 +509,20 @@ find_event_field(struct ftrace_event_call *call, char *name) return NULL; } +static struct ftrace_event_field * +find_event_field(struct ftrace_event_call *call, char *name) +{ + struct ftrace_event_field *field; + struct list_head *head; + + field = __find_event_field(&ftrace_common_fields, name); + if (field) + return field; + + head = trace_get_fields(call); + return __find_event_field(head, name); +} + static void filter_free_pred(struct filter_pred *pred) { if (!pred) -- cgit v1.1 From c9d932cf8a1c608b676021aef0189376ba6ef151 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 24 May 2010 16:24:28 +0800 Subject: tracing: Remove test of NULL define_fields callback Every event (or event class) has it's define_fields callback, so the test is redundant. Signed-off-by: Li Zefan LKML-Reference: <4BFA37BC.8080707@cn.fujitsu.com> Signed-off-by: Steven Rostedt --- kernel/trace/trace_events.c | 28 +++++++++++++--------------- kernel/trace/trace_events_filter.c | 9 --------- 2 files changed, 13 insertions(+), 24 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index d3b4bdf..5bad9cb 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -987,23 +987,21 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, id); #endif - if (call->class->define_fields) { - /* - * Other events may have the same class. Only update - * the fields if they are not already defined. - */ - head = trace_get_fields(call); - if (list_empty(head)) { - ret = call->class->define_fields(call); - if (ret < 0) { - pr_warning("Could not initialize trace point" - " events/%s\n", call->name); - return ret; - } + /* + * Other events may have the same class. Only update + * the fields if they are not already defined. + */ + head = trace_get_fields(call); + if (list_empty(head)) { + ret = call->class->define_fields(call); + if (ret < 0) { + pr_warning("Could not initialize trace point" + " events/%s\n", call->name); + return ret; } - trace_create_file("filter", 0644, call->dir, call, - filter); } + trace_create_file("filter", 0644, call->dir, call, + filter); trace_create_file("format", 0444, call->dir, call, format); diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 330fefd..36d4010 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -639,9 +639,6 @@ static int init_subsystem_preds(struct event_subsystem *system) int err; list_for_each_entry(call, &ftrace_events, list) { - if (!call->class || !call->class->define_fields) - continue; - if (strcmp(call->class->system, system->name) != 0) continue; @@ -658,9 +655,6 @@ static void filter_free_subsystem_preds(struct event_subsystem *system) struct ftrace_event_call *call; list_for_each_entry(call, &ftrace_events, list) { - if (!call->class || !call->class->define_fields) - continue; - if (strcmp(call->class->system, system->name) != 0) continue; @@ -1263,9 +1257,6 @@ static int replace_system_preds(struct event_subsystem *system, list_for_each_entry(call, &ftrace_events, list) { struct event_filter *filter = call->filter; - if (!call->class || !call->class->define_fields) - continue; - if (strcmp(call->class->system, system->name) != 0) continue; -- cgit v1.1 From ffb9f99528574ab9a55d4c8fd22e9d3ca49efa86 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 24 May 2010 16:24:52 +0800 Subject: tracing: Remove redundant raw_init callbacks raw_init callback is optional. Signed-off-by: Li Zefan LKML-Reference: <4BFA37D4.7070500@cn.fujitsu.com> Signed-off-by: Steven Rostedt --- kernel/trace/trace_export.c | 8 +------- kernel/trace/trace_kprobe.c | 10 +--------- 2 files changed, 2 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 8536e2a..4ba44de 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -125,12 +125,6 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ #include "trace_entries.h" -static int ftrace_raw_init_event(struct ftrace_event_call *call) -{ - INIT_LIST_HEAD(&call->class->fields); - return 0; -} - #undef __entry #define __entry REC @@ -158,7 +152,7 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call) struct ftrace_event_class event_class_ftrace_##call = { \ .system = __stringify(TRACE_SYSTEM), \ .define_fields = ftrace_define_fields_##call, \ - .raw_init = ftrace_raw_init_event, \ + .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\ }; \ \ struct ftrace_event_call __used \ diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index f52b5f5..3b831d8 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1214,11 +1214,6 @@ static void probe_event_disable(struct ftrace_event_call *call) } } -static int probe_event_raw_init(struct ftrace_event_call *event_call) -{ - return 0; -} - #undef DEFINE_FIELD #define DEFINE_FIELD(type, item, name, is_signed) \ do { \ @@ -1486,15 +1481,12 @@ static int register_probe_event(struct trace_probe *tp) int ret; /* Initialize ftrace_event_call */ + INIT_LIST_HEAD(&call->class->fields); if (probe_is_return(tp)) { - INIT_LIST_HEAD(&call->class->fields); call->event.funcs = &kretprobe_funcs; - call->class->raw_init = probe_event_raw_init; call->class->define_fields = kretprobe_event_define_fields; } else { - INIT_LIST_HEAD(&call->class->fields); call->event.funcs = &kprobe_funcs; - call->class->raw_init = probe_event_raw_init; call->class->define_fields = kprobe_event_define_fields; } if (set_print_fmt(tp) < 0) -- cgit v1.1 From 67ead0a6ceb001b4cb891d782e440f0e79493ba2 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 24 May 2010 16:25:13 +0800 Subject: tracing: Remove open-coded __trace_add_event_call() Let trace_module_add_events() and event_trace_init() call __trace_add_event_call(). Signed-off-by: Li Zefan LKML-Reference: <4BFA37E9.1020106@cn.fujitsu.com> Signed-off-by: Steven Rostedt --- kernel/trace/trace_events.c | 70 ++++++++++++--------------------------------- 1 file changed, 19 insertions(+), 51 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 5bad9cb..69bee4c 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1009,11 +1009,17 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, return 0; } -static int __trace_add_event_call(struct ftrace_event_call *call) +static int +__trace_add_event_call(struct ftrace_event_call *call, struct module *mod, + const struct file_operations *id, + const struct file_operations *enable, + const struct file_operations *filter, + const struct file_operations *format) { struct dentry *d_events; int ret; + /* The linker may leave blanks */ if (!call->name) return -EINVAL; @@ -1021,8 +1027,8 @@ static int __trace_add_event_call(struct ftrace_event_call *call) ret = call->class->raw_init(call); if (ret < 0) { if (ret != -ENOSYS) - pr_warning("Could not initialize trace " - "events/%s\n", call->name); + pr_warning("Could not initialize trace events/%s\n", + call->name); return ret; } } @@ -1031,11 +1037,10 @@ static int __trace_add_event_call(struct ftrace_event_call *call) if (!d_events) return -ENOENT; - ret = event_create_dir(call, d_events, &ftrace_event_id_fops, - &ftrace_enable_fops, &ftrace_event_filter_fops, - &ftrace_event_format_fops); + ret = event_create_dir(call, d_events, id, enable, filter, format); if (!ret) list_add(&call->list, &ftrace_events); + call->mod = mod; return ret; } @@ -1045,7 +1050,10 @@ int trace_add_event_call(struct ftrace_event_call *call) { int ret; mutex_lock(&event_mutex); - ret = __trace_add_event_call(call); + ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops, + &ftrace_enable_fops, + &ftrace_event_filter_fops, + &ftrace_event_format_fops); mutex_unlock(&event_mutex); return ret; } @@ -1162,8 +1170,6 @@ static void trace_module_add_events(struct module *mod) { struct ftrace_module_file_ops *file_ops = NULL; struct ftrace_event_call *call, *start, *end; - struct dentry *d_events; - int ret; start = mod->trace_events; end = mod->trace_events + mod->num_trace_events; @@ -1171,38 +1177,14 @@ static void trace_module_add_events(struct module *mod) if (start == end) return; - d_events = event_trace_events_dir(); - if (!d_events) + file_ops = trace_create_file_ops(mod); + if (!file_ops) return; for_each_event(call, start, end) { - /* The linker may leave blanks */ - if (!call->name) - continue; - if (call->class->raw_init) { - ret = call->class->raw_init(call); - if (ret < 0) { - if (ret != -ENOSYS) - pr_warning("Could not initialize trace " - "point events/%s\n", call->name); - continue; - } - } - /* - * This module has events, create file ops for this module - * if not already done. - */ - if (!file_ops) { - file_ops = trace_create_file_ops(mod); - if (!file_ops) - return; - } - call->mod = mod; - ret = event_create_dir(call, d_events, + __trace_add_event_call(call, mod, &file_ops->id, &file_ops->enable, &file_ops->filter, &file_ops->format); - if (!ret) - list_add(&call->list, &ftrace_events); } } @@ -1333,24 +1315,10 @@ static __init int event_trace_init(void) pr_warning("tracing: Failed to allocate common fields"); for_each_event(call, __start_ftrace_events, __stop_ftrace_events) { - /* The linker may leave blanks */ - if (!call->name) - continue; - if (call->class->raw_init) { - ret = call->class->raw_init(call); - if (ret < 0) { - if (ret != -ENOSYS) - pr_warning("Could not initialize trace " - "point events/%s\n", call->name); - continue; - } - } - ret = event_create_dir(call, d_events, &ftrace_event_id_fops, + __trace_add_event_call(call, NULL, &ftrace_event_id_fops, &ftrace_enable_fops, &ftrace_event_filter_fops, &ftrace_event_format_fops); - if (!ret) - list_add(&call->list, &ftrace_events); } while (true) { -- cgit v1.1 From d62f85d1e22e537192ce494c89540e1ac0d8bfc7 Mon Sep 17 00:00:00 2001 From: Chase Douglas Date: Tue, 15 Jun 2010 12:29:15 -0400 Subject: tracing/function-graph: Use correct string size for snprintf The nsecs_str string is a local variable defined as: char nsecs_str[5]; It is possible for the snprintf call to use a size value larger than the size of the string. This should not cause a buffer overrun as it is written now due to the value for the string format "%03lu" can not be larger than 1000. However, this change makes it correct. By making the size correct we guard against potential future changes that could actually cause a buffer overrun. Signed-off-by: Chase Douglas LKML-Reference: <1276619355-18116-1-git-send-email-chase.douglas@canonical.com> [ added 'UL' to number 8 to fix gcc warning comparing it to sizeof() ] Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions_graph.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 79f4bac..6bff236 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -641,7 +641,8 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) /* Print nsecs (we don't want to exceed 7 numbers) */ if (len < 7) { - snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem); + snprintf(nsecs_str, min(sizeof(nsecs_str), 8UL - len), "%03lu", + nsecs_rem); ret = trace_seq_printf(s, ".%s", nsecs_str); if (!ret) return TRACE_TYPE_PARTIAL_LINE; -- cgit v1.1 From a1d0ce8213e9ddf4046ef5ba95c55762d075f541 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 8 Jun 2010 11:22:06 -0400 Subject: tracing: Use class->reg() for all registering of events Because kprobes and syscalls need special processing to register events, the class->reg() method was created to handle the differences. But instead of creating a default ->reg for perf and ftrace events, the code was scattered with: if (class->reg) class->reg(); else default_reg(); This is messy and can also lead to bugs. This patch cleans up this code and creates a default reg() entry for the events allowing for the code to directly call the class->reg() without the condition. Reported-by: Peter Zijlstra Acked-by: Peter Zijlstra Signed-off-by: Steven Rostedt --- kernel/trace/trace_event_perf.c | 19 +++----------- kernel/trace/trace_events.c | 55 +++++++++++++++++++++++++++-------------- 2 files changed, 39 insertions(+), 35 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 6053982..2375165 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -54,13 +54,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event, } } - if (tp_event->class->reg) - ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER); - else - ret = tracepoint_probe_register(tp_event->name, - tp_event->class->perf_probe, - tp_event); - + ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER); if (ret) goto fail; @@ -94,9 +88,7 @@ int perf_trace_init(struct perf_event *p_event) mutex_lock(&event_mutex); list_for_each_entry(tp_event, &ftrace_events, list) { if (tp_event->event.type == event_id && - tp_event->class && - (tp_event->class->perf_probe || - tp_event->class->reg) && + tp_event->class && tp_event->class->reg && try_module_get(tp_event->mod)) { ret = perf_trace_event_init(tp_event, p_event); break; @@ -136,12 +128,7 @@ void perf_trace_destroy(struct perf_event *p_event) if (--tp_event->perf_refcount > 0) goto out; - if (tp_event->class->reg) - tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER); - else - tracepoint_probe_unregister(tp_event->name, - tp_event->class->perf_probe, - tp_event); + tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER); /* * Ensure our callback won't be called anymore. See diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 69bee4c..e8e6043 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -141,6 +141,35 @@ int trace_event_raw_init(struct ftrace_event_call *call) } EXPORT_SYMBOL_GPL(trace_event_raw_init); +int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type) +{ + switch (type) { + case TRACE_REG_REGISTER: + return tracepoint_probe_register(call->name, + call->class->probe, + call); + case TRACE_REG_UNREGISTER: + tracepoint_probe_unregister(call->name, + call->class->probe, + call); + return 0; + +#ifdef CONFIG_PERF_EVENTS + case TRACE_REG_PERF_REGISTER: + return tracepoint_probe_register(call->name, + call->class->perf_probe, + call); + case TRACE_REG_PERF_UNREGISTER: + tracepoint_probe_unregister(call->name, + call->class->perf_probe, + call); + return 0; +#endif + } + return 0; +} +EXPORT_SYMBOL_GPL(ftrace_event_reg); + static int ftrace_event_enable_disable(struct ftrace_event_call *call, int enable) { @@ -151,23 +180,13 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call, if (call->flags & TRACE_EVENT_FL_ENABLED) { call->flags &= ~TRACE_EVENT_FL_ENABLED; tracing_stop_cmdline_record(); - if (call->class->reg) - call->class->reg(call, TRACE_REG_UNREGISTER); - else - tracepoint_probe_unregister(call->name, - call->class->probe, - call); + call->class->reg(call, TRACE_REG_UNREGISTER); } break; case 1: if (!(call->flags & TRACE_EVENT_FL_ENABLED)) { tracing_start_cmdline_record(); - if (call->class->reg) - ret = call->class->reg(call, TRACE_REG_REGISTER); - else - ret = tracepoint_probe_register(call->name, - call->class->probe, - call); + ret = call->class->reg(call, TRACE_REG_REGISTER); if (ret) { tracing_stop_cmdline_record(); pr_info("event trace: Could not enable event " @@ -205,8 +224,7 @@ static int __ftrace_set_clr_event(const char *match, const char *sub, mutex_lock(&event_mutex); list_for_each_entry(call, &ftrace_events, list) { - if (!call->name || !call->class || - (!call->class->probe && !call->class->reg)) + if (!call->name || !call->class || !call->class->reg) continue; if (match && @@ -332,7 +350,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) * The ftrace subsystem is for showing formats only. * They can not be enabled or disabled via the event files. */ - if (call->class && (call->class->probe || call->class->reg)) + if (call->class && call->class->reg) return call; } @@ -485,8 +503,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, mutex_lock(&event_mutex); list_for_each_entry(call, &ftrace_events, list) { - if (!call->name || !call->class || - (!call->class->probe && !call->class->reg)) + if (!call->name || !call->class || !call->class->reg) continue; if (system && strcmp(call->class->system, system) != 0) @@ -977,12 +994,12 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, return -1; } - if (call->class->probe || call->class->reg) + if (call->class->reg) trace_create_file("enable", 0644, call->dir, call, enable); #ifdef CONFIG_PERF_EVENTS - if (call->event.type && (call->class->perf_probe || call->class->reg)) + if (call->event.type && call->class->reg) trace_create_file("id", 0444, call->dir, call, id); #endif -- cgit v1.1 From b56c0d8937e665a27d90517ee7a746d0aa05af46 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:09 +0200 Subject: kthread: implement kthread_worker Implement simple work processor for kthread. This is to ease using kthread. Single thread workqueue used to be used for things like this but workqueue won't guarantee fixed kthread association anymore to enable worker sharing. This can be used in cases where specific kthread association is necessary, for example, when it should have RT priority or be assigned to certain cgroup. Signed-off-by: Tejun Heo Cc: Andrew Morton --- kernel/kthread.c | 149 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 149 insertions(+) (limited to 'kernel') diff --git a/kernel/kthread.c b/kernel/kthread.c index 83911c78..8b63c7f 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include static DEFINE_SPINLOCK(kthread_create_lock); @@ -247,3 +249,150 @@ int kthreadd(void *unused) return 0; } + +/** + * kthread_worker_fn - kthread function to process kthread_worker + * @worker_ptr: pointer to initialized kthread_worker + * + * This function can be used as @threadfn to kthread_create() or + * kthread_run() with @worker_ptr argument pointing to an initialized + * kthread_worker. The started kthread will process work_list until + * the it is stopped with kthread_stop(). A kthread can also call + * this function directly after extra initialization. + * + * Different kthreads can be used for the same kthread_worker as long + * as there's only one kthread attached to it at any given time. A + * kthread_worker without an attached kthread simply collects queued + * kthread_works. + */ +int kthread_worker_fn(void *worker_ptr) +{ + struct kthread_worker *worker = worker_ptr; + struct kthread_work *work; + + WARN_ON(worker->task); + worker->task = current; +repeat: + set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ + + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); + spin_lock_irq(&worker->lock); + worker->task = NULL; + spin_unlock_irq(&worker->lock); + return 0; + } + + work = NULL; + spin_lock_irq(&worker->lock); + if (!list_empty(&worker->work_list)) { + work = list_first_entry(&worker->work_list, + struct kthread_work, node); + list_del_init(&work->node); + } + spin_unlock_irq(&worker->lock); + + if (work) { + __set_current_state(TASK_RUNNING); + work->func(work); + smp_wmb(); /* wmb worker-b0 paired with flush-b1 */ + work->done_seq = work->queue_seq; + smp_mb(); /* mb worker-b1 paired with flush-b0 */ + if (atomic_read(&work->flushing)) + wake_up_all(&work->done); + } else if (!freezing(current)) + schedule(); + + try_to_freeze(); + goto repeat; +} +EXPORT_SYMBOL_GPL(kthread_worker_fn); + +/** + * queue_kthread_work - queue a kthread_work + * @worker: target kthread_worker + * @work: kthread_work to queue + * + * Queue @work to work processor @task for async execution. @task + * must have been created with kthread_worker_create(). Returns %true + * if @work was successfully queued, %false if it was already pending. + */ +bool queue_kthread_work(struct kthread_worker *worker, + struct kthread_work *work) +{ + bool ret = false; + unsigned long flags; + + spin_lock_irqsave(&worker->lock, flags); + if (list_empty(&work->node)) { + list_add_tail(&work->node, &worker->work_list); + work->queue_seq++; + if (likely(worker->task)) + wake_up_process(worker->task); + ret = true; + } + spin_unlock_irqrestore(&worker->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(queue_kthread_work); + +/** + * flush_kthread_work - flush a kthread_work + * @work: work to flush + * + * If @work is queued or executing, wait for it to finish execution. + */ +void flush_kthread_work(struct kthread_work *work) +{ + int seq = work->queue_seq; + + atomic_inc(&work->flushing); + + /* + * mb flush-b0 paired with worker-b1, to make sure either + * worker sees the above increment or we see done_seq update. + */ + smp_mb__after_atomic_inc(); + + /* A - B <= 0 tests whether B is in front of A regardless of overflow */ + wait_event(work->done, seq - work->done_seq <= 0); + atomic_dec(&work->flushing); + + /* + * rmb flush-b1 paired with worker-b0, to make sure our caller + * sees every change made by work->func(). + */ + smp_mb__after_atomic_dec(); +} +EXPORT_SYMBOL_GPL(flush_kthread_work); + +struct kthread_flush_work { + struct kthread_work work; + struct completion done; +}; + +static void kthread_flush_work_fn(struct kthread_work *work) +{ + struct kthread_flush_work *fwork = + container_of(work, struct kthread_flush_work, work); + complete(&fwork->done); +} + +/** + * flush_kthread_worker - flush all current works on a kthread_worker + * @worker: worker to flush + * + * Wait until all currently executing or pending works on @worker are + * finished. + */ +void flush_kthread_worker(struct kthread_worker *worker) +{ + struct kthread_flush_work fwork = { + KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), + COMPLETION_INITIALIZER_ONSTACK(fwork.done), + }; + + queue_kthread_work(worker, &fwork.work); + wait_for_completion(&fwork.done); +} +EXPORT_SYMBOL_GPL(flush_kthread_worker); -- cgit v1.1 From 82805ab77d25643f579d90397dcd34f05d1b750a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:09 +0200 Subject: kthread: implement kthread_data() Implement kthread_data() which takes @task pointing to a kthread and returns @data specified when creating the kthread. The caller is responsible for ensuring the validity of @task when calling this function. Signed-off-by: Tejun Heo --- kernel/kthread.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'kernel') diff --git a/kernel/kthread.c b/kernel/kthread.c index 8b63c7f..2dc3786 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -37,6 +37,7 @@ struct kthread_create_info struct kthread { int should_stop; + void *data; struct completion exited; }; @@ -56,6 +57,19 @@ int kthread_should_stop(void) } EXPORT_SYMBOL(kthread_should_stop); +/** + * kthread_data - return data value specified on kthread creation + * @task: kthread task in question + * + * Return the data value specified when kthread @task was created. + * The caller is responsible for ensuring the validity of @task when + * calling this function. + */ +void *kthread_data(struct task_struct *task) +{ + return to_kthread(task)->data; +} + static int kthread(void *_create) { /* Copy data: it's on kthread's stack */ @@ -66,6 +80,7 @@ static int kthread(void *_create) int ret; self.should_stop = 0; + self.data = data; init_completion(&self.exited); current->vfork_done = &self.exited; -- cgit v1.1 From c790bce0481857412c964c5e9d46d56e41c4b051 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:09 +0200 Subject: workqueue: kill RT workqueue With stop_machine() converted to use cpu_stop, RT workqueue doesn't have any user left. Kill RT workqueue support. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 327d2de..1a47fbf92 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -62,7 +62,6 @@ struct workqueue_struct { const char *name; int singlethread; int freezeable; /* Freeze threads during suspend */ - int rt; #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif @@ -947,7 +946,6 @@ init_cpu_workqueue(struct workqueue_struct *wq, int cpu) static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) { - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; struct workqueue_struct *wq = cwq->wq; const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d"; struct task_struct *p; @@ -963,8 +961,6 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) */ if (IS_ERR(p)) return PTR_ERR(p); - if (cwq->wq->rt) - sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); cwq->thread = p; trace_workqueue_creation(cwq->thread, cpu); @@ -986,7 +982,6 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) struct workqueue_struct *__create_workqueue_key(const char *name, int singlethread, int freezeable, - int rt, struct lock_class_key *key, const char *lock_name) { @@ -1008,7 +1003,6 @@ struct workqueue_struct *__create_workqueue_key(const char *name, lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); wq->singlethread = singlethread; wq->freezeable = freezeable; - wq->rt = rt; INIT_LIST_HEAD(&wq->list); if (singlethread) { -- cgit v1.1 From 4690c4ab56c71919893ca25252f2dd65b58188c7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:10 +0200 Subject: workqueue: misc/cosmetic updates Make the following updates in preparation of concurrency managed workqueue. None of these changes causes any visible behavior difference. * Add comments and adjust indentations to data structures and several functions. * Rename wq_per_cpu() to get_cwq() and swap the position of two parameters for consistency. Convert a direct per_cpu_ptr() access to wq->cpu_wq to get_cwq(). * Add work_static() and Update set_wq_data() such that it sets the flags part to WORK_STRUCT_PENDING | WORK_STRUCT_STATIC if static | @extra_flags. * Move santiy check on work->entry emptiness from queue_work_on() to __queue_work() which all queueing paths share. * Make __queue_work() take @cpu and @wq instead of @cwq. * Restructure flush_work() and __create_workqueue_key() to make them easier to modify. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 131 ++++++++++++++++++++++++++++++++++------------------- 1 file changed, 84 insertions(+), 47 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1a47fbf92..c56146a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -37,6 +37,16 @@ #include /* + * Structure fields follow one of the following exclusion rules. + * + * I: Set during initialization and read-only afterwards. + * + * L: cwq->lock protected. Access with cwq->lock held. + * + * W: workqueue_lock protected. + */ + +/* * The per-CPU workqueue (if single thread, we always use the first * possible cpu). */ @@ -48,8 +58,8 @@ struct cpu_workqueue_struct { wait_queue_head_t more_work; struct work_struct *current_work; - struct workqueue_struct *wq; - struct task_struct *thread; + struct workqueue_struct *wq; /* I: the owning workqueue */ + struct task_struct *thread; } ____cacheline_aligned; /* @@ -57,13 +67,13 @@ struct cpu_workqueue_struct { * per-CPU workqueues: */ struct workqueue_struct { - struct cpu_workqueue_struct *cpu_wq; - struct list_head list; - const char *name; + struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */ + struct list_head list; /* W: list of all workqueues */ + const char *name; /* I: workqueue name */ int singlethread; int freezeable; /* Freeze threads during suspend */ #ifdef CONFIG_LOCKDEP - struct lockdep_map lockdep_map; + struct lockdep_map lockdep_map; #endif }; @@ -204,8 +214,8 @@ static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) ? cpu_singlethread_map : cpu_populated_map; } -static -struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) +static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, + struct workqueue_struct *wq) { if (unlikely(is_wq_single_threaded(wq))) cpu = singlethread_cpu; @@ -217,15 +227,13 @@ struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) * - Must *only* be called if the pending flag is set */ static inline void set_wq_data(struct work_struct *work, - struct cpu_workqueue_struct *cwq) + struct cpu_workqueue_struct *cwq, + unsigned long extra_flags) { - unsigned long new; - BUG_ON(!work_pending(work)); - new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); - new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); - atomic_long_set(&work->data, new); + atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) | + (1UL << WORK_STRUCT_PENDING) | extra_flags); } /* @@ -233,9 +241,7 @@ static inline void set_wq_data(struct work_struct *work, */ static inline void clear_wq_data(struct work_struct *work) { - unsigned long flags = *work_data_bits(work) & - (1UL << WORK_STRUCT_STATIC); - atomic_long_set(&work->data, flags); + atomic_long_set(&work->data, work_static(work)); } static inline @@ -244,29 +250,47 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); } +/** + * insert_work - insert a work into cwq + * @cwq: cwq @work belongs to + * @work: work to insert + * @head: insertion point + * @extra_flags: extra WORK_STRUCT_* flags to set + * + * Insert @work into @cwq after @head. + * + * CONTEXT: + * spin_lock_irq(cwq->lock). + */ static void insert_work(struct cpu_workqueue_struct *cwq, - struct work_struct *work, struct list_head *head) + struct work_struct *work, struct list_head *head, + unsigned int extra_flags) { trace_workqueue_insertion(cwq->thread, work); - set_wq_data(work, cwq); + /* we own @work, set data and link */ + set_wq_data(work, cwq, extra_flags); + /* * Ensure that we get the right work->data if we see the * result of list_add() below, see try_to_grab_pending(). */ smp_wmb(); + list_add_tail(&work->entry, head); wake_up(&cwq->more_work); } -static void __queue_work(struct cpu_workqueue_struct *cwq, +static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct work_struct *work) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); unsigned long flags; debug_work_activate(work); spin_lock_irqsave(&cwq->lock, flags); - insert_work(cwq, work, &cwq->worklist); + BUG_ON(!list_empty(&work->entry)); + insert_work(cwq, work, &cwq->worklist, 0); spin_unlock_irqrestore(&cwq->lock, flags); } @@ -308,8 +332,7 @@ queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) int ret = 0; if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { - BUG_ON(!list_empty(&work->entry)); - __queue_work(wq_per_cpu(wq, cpu), work); + __queue_work(cpu, wq, work); ret = 1; } return ret; @@ -320,9 +343,8 @@ static void delayed_work_timer_fn(unsigned long __data) { struct delayed_work *dwork = (struct delayed_work *)__data; struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); - struct workqueue_struct *wq = cwq->wq; - __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); + __queue_work(smp_processor_id(), cwq->wq, &dwork->work); } /** @@ -366,7 +388,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, timer_stats_timer_set_start_info(&dwork->timer); /* This stores cwq for the moment, for the timer_fn */ - set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); + set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0); timer->expires = jiffies + delay; timer->data = (unsigned long)dwork; timer->function = delayed_work_timer_fn; @@ -430,6 +452,12 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) spin_unlock_irq(&cwq->lock); } +/** + * worker_thread - the worker thread function + * @__cwq: cwq to serve + * + * The cwq worker thread function. + */ static int worker_thread(void *__cwq) { struct cpu_workqueue_struct *cwq = __cwq; @@ -468,6 +496,17 @@ static void wq_barrier_func(struct work_struct *work) complete(&barr->done); } +/** + * insert_wq_barrier - insert a barrier work + * @cwq: cwq to insert barrier into + * @barr: wq_barrier to insert + * @head: insertion point + * + * Insert barrier @barr into @cwq before @head. + * + * CONTEXT: + * spin_lock_irq(cwq->lock). + */ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, struct wq_barrier *barr, struct list_head *head) { @@ -479,11 +518,10 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, */ INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); - init_completion(&barr->done); debug_work_activate(&barr->work); - insert_work(cwq, &barr->work, head); + insert_work(cwq, &barr->work, head, 0); } static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) @@ -517,9 +555,6 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) * * We sleep until all works which were queued on entry have been handled, * but we are not livelocked by new incoming ones. - * - * This function used to run the workqueues itself. Now we just wait for the - * helper threads to do it. */ void flush_workqueue(struct workqueue_struct *wq) { @@ -558,7 +593,6 @@ int flush_work(struct work_struct *work) lock_map_acquire(&cwq->wq->lockdep_map); lock_map_release(&cwq->wq->lockdep_map); - prev = NULL; spin_lock_irq(&cwq->lock); if (!list_empty(&work->entry)) { /* @@ -567,22 +601,22 @@ int flush_work(struct work_struct *work) */ smp_rmb(); if (unlikely(cwq != get_wq_data(work))) - goto out; + goto already_gone; prev = &work->entry; } else { if (cwq->current_work != work) - goto out; + goto already_gone; prev = &cwq->worklist; } insert_wq_barrier(cwq, &barr, prev->next); -out: - spin_unlock_irq(&cwq->lock); - if (!prev) - return 0; + spin_unlock_irq(&cwq->lock); wait_for_completion(&barr.done); destroy_work_on_stack(&barr.work); return 1; +already_gone: + spin_unlock_irq(&cwq->lock); + return 0; } EXPORT_SYMBOL_GPL(flush_work); @@ -665,7 +699,7 @@ static void wait_on_work(struct work_struct *work) cpu_map = wq_cpu_map(wq); for_each_cpu(cpu, cpu_map) - wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); + wait_on_cpu_work(get_cwq(cpu, wq), work); } static int __cancel_work_timer(struct work_struct *work, @@ -782,9 +816,8 @@ EXPORT_SYMBOL(schedule_delayed_work); void flush_delayed_work(struct delayed_work *dwork) { if (del_timer_sync(&dwork->timer)) { - struct cpu_workqueue_struct *cwq; - cwq = wq_per_cpu(get_wq_data(&dwork->work)->wq, get_cpu()); - __queue_work(cwq, &dwork->work); + __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq, + &dwork->work); put_cpu(); } flush_work(&dwork->work); @@ -991,13 +1024,11 @@ struct workqueue_struct *__create_workqueue_key(const char *name, wq = kzalloc(sizeof(*wq), GFP_KERNEL); if (!wq) - return NULL; + goto err; wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); - if (!wq->cpu_wq) { - kfree(wq); - return NULL; - } + if (!wq->cpu_wq) + goto err; wq->name = name; lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); @@ -1041,6 +1072,12 @@ struct workqueue_struct *__create_workqueue_key(const char *name, wq = NULL; } return wq; +err: + if (wq) { + free_percpu(wq->cpu_wq); + kfree(wq); + } + return NULL; } EXPORT_SYMBOL_GPL(__create_workqueue_key); -- cgit v1.1 From 97e37d7b9e65a6ac939f796f91081135b7a08acc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:10 +0200 Subject: workqueue: merge feature parameters into flags Currently, __create_workqueue_key() takes @singlethread and @freezeable paramters and store them separately in workqueue_struct. Merge them into a single flags parameter and field and use WQ_FREEZEABLE and WQ_SINGLE_THREAD. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c56146a..68e4dd8 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -67,11 +67,10 @@ struct cpu_workqueue_struct { * per-CPU workqueues: */ struct workqueue_struct { + unsigned int flags; /* I: WQ_* flags */ struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */ struct list_head list; /* W: list of all workqueues */ const char *name; /* I: workqueue name */ - int singlethread; - int freezeable; /* Freeze threads during suspend */ #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif @@ -203,9 +202,9 @@ static const struct cpumask *cpu_singlethread_map __read_mostly; static cpumask_var_t cpu_populated_map __read_mostly; /* If it's single threaded, it isn't in the list of workqueues. */ -static inline int is_wq_single_threaded(struct workqueue_struct *wq) +static inline bool is_wq_single_threaded(struct workqueue_struct *wq) { - return wq->singlethread; + return wq->flags & WQ_SINGLE_THREAD; } static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) @@ -463,7 +462,7 @@ static int worker_thread(void *__cwq) struct cpu_workqueue_struct *cwq = __cwq; DEFINE_WAIT(wait); - if (cwq->wq->freezeable) + if (cwq->wq->flags & WQ_FREEZEABLE) set_freezable(); for (;;) { @@ -1013,8 +1012,7 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) } struct workqueue_struct *__create_workqueue_key(const char *name, - int singlethread, - int freezeable, + unsigned int flags, struct lock_class_key *key, const char *lock_name) { @@ -1030,13 +1028,12 @@ struct workqueue_struct *__create_workqueue_key(const char *name, if (!wq->cpu_wq) goto err; + wq->flags = flags; wq->name = name; lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); - wq->singlethread = singlethread; - wq->freezeable = freezeable; INIT_LIST_HEAD(&wq->list); - if (singlethread) { + if (flags & WQ_SINGLE_THREAD) { cwq = init_cpu_workqueue(wq, singlethread_cpu); err = create_workqueue_thread(cwq, singlethread_cpu); start_workqueue_thread(cwq, -1); -- cgit v1.1 From 22df02bb3fab24af97bff4c69cc6fd8529fc66fe Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:10 +0200 Subject: workqueue: define masks for work flags and conditionalize STATIC flags Work flags are about to see more traditional mask handling. Define WORK_STRUCT_*_BIT as the bit position constant and redefine WORK_STRUCT_* as bit masks. Also, make WORK_STRUCT_STATIC_* flags conditional While at it, re-define these constants as enums and use WORK_STRUCT_STATIC instead of hard-coding 2 in WORK_DATA_STATIC_INIT(). Signed-off-by: Tejun Heo --- kernel/workqueue.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 68e4dd8..5c49d76 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -115,7 +115,7 @@ static int work_fixup_activate(void *addr, enum debug_obj_state state) * statically initialized. We just make sure that it * is tracked in the object tracker. */ - if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) { + if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { debug_object_init(work, &work_debug_descr); debug_object_activate(work, &work_debug_descr); return 0; @@ -232,7 +232,7 @@ static inline void set_wq_data(struct work_struct *work, BUG_ON(!work_pending(work)); atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) | - (1UL << WORK_STRUCT_PENDING) | extra_flags); + WORK_STRUCT_PENDING | extra_flags); } /* @@ -330,7 +330,7 @@ queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) { int ret = 0; - if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { __queue_work(cpu, wq, work); ret = 1; } @@ -380,7 +380,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct timer_list *timer = &dwork->timer; struct work_struct *work = &dwork->work; - if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); @@ -516,7 +516,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, * might deadlock. */ INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); - __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); + __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); init_completion(&barr->done); debug_work_activate(&barr->work); @@ -628,7 +628,7 @@ static int try_to_grab_pending(struct work_struct *work) struct cpu_workqueue_struct *cwq; int ret = -1; - if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) return 0; /* -- cgit v1.1 From a62428c0ae54a39e411251e836c3fe3dc11a5f5f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:10 +0200 Subject: workqueue: separate out process_one_work() Separate out process_one_work() out of run_workqueue(). This patch doesn't cause any behavior change. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 100 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 61 insertions(+), 39 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 5c49d76..8e3082b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -402,51 +402,73 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, } EXPORT_SYMBOL_GPL(queue_delayed_work_on); +/** + * process_one_work - process single work + * @cwq: cwq to process work for + * @work: work to process + * + * Process @work. This function contains all the logics necessary to + * process a single work including synchronization against and + * interaction with other workers on the same cpu, queueing and + * flushing. As long as context requirement is met, any worker can + * call this function to process a work. + * + * CONTEXT: + * spin_lock_irq(cwq->lock) which is released and regrabbed. + */ +static void process_one_work(struct cpu_workqueue_struct *cwq, + struct work_struct *work) +{ + work_func_t f = work->func; +#ifdef CONFIG_LOCKDEP + /* + * It is permissible to free the struct work_struct from + * inside the function that is called from it, this we need to + * take into account for lockdep too. To avoid bogus "held + * lock freed" warnings as well as problems when looking into + * work->lockdep_map, make a copy and use that here. + */ + struct lockdep_map lockdep_map = work->lockdep_map; +#endif + /* claim and process */ + trace_workqueue_execution(cwq->thread, work); + debug_work_deactivate(work); + cwq->current_work = work; + list_del_init(&work->entry); + + spin_unlock_irq(&cwq->lock); + + BUG_ON(get_wq_data(work) != cwq); + work_clear_pending(work); + lock_map_acquire(&cwq->wq->lockdep_map); + lock_map_acquire(&lockdep_map); + f(work); + lock_map_release(&lockdep_map); + lock_map_release(&cwq->wq->lockdep_map); + + if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { + printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " + "%s/0x%08x/%d\n", + current->comm, preempt_count(), task_pid_nr(current)); + printk(KERN_ERR " last function: "); + print_symbol("%s\n", (unsigned long)f); + debug_show_held_locks(current); + dump_stack(); + } + + spin_lock_irq(&cwq->lock); + + /* we're done with it, release */ + cwq->current_work = NULL; +} + static void run_workqueue(struct cpu_workqueue_struct *cwq) { spin_lock_irq(&cwq->lock); while (!list_empty(&cwq->worklist)) { struct work_struct *work = list_entry(cwq->worklist.next, struct work_struct, entry); - work_func_t f = work->func; -#ifdef CONFIG_LOCKDEP - /* - * It is permissible to free the struct work_struct - * from inside the function that is called from it, - * this we need to take into account for lockdep too. - * To avoid bogus "held lock freed" warnings as well - * as problems when looking into work->lockdep_map, - * make a copy and use that here. - */ - struct lockdep_map lockdep_map = work->lockdep_map; -#endif - trace_workqueue_execution(cwq->thread, work); - debug_work_deactivate(work); - cwq->current_work = work; - list_del_init(cwq->worklist.next); - spin_unlock_irq(&cwq->lock); - - BUG_ON(get_wq_data(work) != cwq); - work_clear_pending(work); - lock_map_acquire(&cwq->wq->lockdep_map); - lock_map_acquire(&lockdep_map); - f(work); - lock_map_release(&lockdep_map); - lock_map_release(&cwq->wq->lockdep_map); - - if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { - printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " - "%s/0x%08x/%d\n", - current->comm, preempt_count(), - task_pid_nr(current)); - printk(KERN_ERR " last function: "); - print_symbol("%s\n", (unsigned long)f); - debug_show_held_locks(current); - dump_stack(); - } - - spin_lock_irq(&cwq->lock); - cwq->current_work = NULL; + process_one_work(cwq, work); } spin_unlock_irq(&cwq->lock); } -- cgit v1.1 From 64166699752006f1a23a9cf7c96ae36654ccfc2c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:11 +0200 Subject: workqueue: temporarily remove workqueue tracing Strip tracing code from workqueue and remove workqueue tracing. This is temporary measure till concurrency managed workqueue is complete. Signed-off-by: Tejun Heo Cc: Frederic Weisbecker --- kernel/trace/Kconfig | 11 ----------- kernel/workqueue.c | 14 +++----------- 2 files changed, 3 insertions(+), 22 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 8b1797c..a0d95c1 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -391,17 +391,6 @@ config KMEMTRACE If unsure, say N. -config WORKQUEUE_TRACER - bool "Trace workqueues" - select GENERIC_TRACER - help - The workqueue tracer provides some statistical information - about each cpu workqueue thread such as the number of the - works inserted and executed since their creation. It can help - to evaluate the amount of work each of them has to perform. - For example it can help a developer to decide whether he should - choose a per-cpu workqueue instead of a singlethreaded one. - config BLK_DEV_IO_TRACE bool "Support for tracing block IO actions" depends on SYSFS diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8e3082b..f7ab703 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -33,8 +33,6 @@ #include #include #include -#define CREATE_TRACE_POINTS -#include /* * Structure fields follow one of the following exclusion rules. @@ -243,10 +241,10 @@ static inline void clear_wq_data(struct work_struct *work) atomic_long_set(&work->data, work_static(work)); } -static inline -struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) +static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) { - return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); + return (void *)(atomic_long_read(&work->data) & + WORK_STRUCT_WQ_DATA_MASK); } /** @@ -265,8 +263,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq, struct work_struct *work, struct list_head *head, unsigned int extra_flags) { - trace_workqueue_insertion(cwq->thread, work); - /* we own @work, set data and link */ set_wq_data(work, cwq, extra_flags); @@ -431,7 +427,6 @@ static void process_one_work(struct cpu_workqueue_struct *cwq, struct lockdep_map lockdep_map = work->lockdep_map; #endif /* claim and process */ - trace_workqueue_execution(cwq->thread, work); debug_work_deactivate(work); cwq->current_work = work; list_del_init(&work->entry); @@ -1017,8 +1012,6 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) return PTR_ERR(p); cwq->thread = p; - trace_workqueue_creation(cwq->thread, cpu); - return 0; } @@ -1123,7 +1116,6 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) * checks list_empty(), and a "normal" queue_work() can't use * a dead CPU. */ - trace_workqueue_destruction(cwq->thread); kthread_stop(cwq->thread); cwq->thread = NULL; } -- cgit v1.1 From 1537663f5763892cacf1409ac0efef1b4f332d1e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:11 +0200 Subject: workqueue: kill cpu_populated_map Worker management is about to be overhauled. Simplify things by removing cpu_populated_map, creating workers for all possible cpus and making single threaded workqueues behave more like multi threaded ones. After this patch, all cwqs are always initialized, all workqueues are linked on the workqueues list and workers for all possibles cpus always exist. This also makes CPU hotplug support simpler - checking ->cpus_allowed before processing works in worker_thread() and flushing cwqs on CPU_POST_DEAD are enough. While at it, make get_cwq() always return the cwq for the specified cpu, add target_cwq() for cases where single thread distinction is necessary and drop all direct usage of per_cpu_ptr() on wq->cpu_wq. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 173 ++++++++++++++++++----------------------------------- 1 file changed, 59 insertions(+), 114 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f7ab703..dc78956 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -55,6 +55,7 @@ struct cpu_workqueue_struct { struct list_head worklist; wait_queue_head_t more_work; struct work_struct *current_work; + unsigned int cpu; struct workqueue_struct *wq; /* I: the owning workqueue */ struct task_struct *thread; @@ -189,34 +190,19 @@ static DEFINE_SPINLOCK(workqueue_lock); static LIST_HEAD(workqueues); static int singlethread_cpu __read_mostly; -static const struct cpumask *cpu_singlethread_map __read_mostly; -/* - * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD - * flushes cwq->worklist. This means that flush_workqueue/wait_on_work - * which comes in between can't use for_each_online_cpu(). We could - * use cpu_possible_map, the cpumask below is more a documentation - * than optimization. - */ -static cpumask_var_t cpu_populated_map __read_mostly; - -/* If it's single threaded, it isn't in the list of workqueues. */ -static inline bool is_wq_single_threaded(struct workqueue_struct *wq) -{ - return wq->flags & WQ_SINGLE_THREAD; -} -static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) +static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, + struct workqueue_struct *wq) { - return is_wq_single_threaded(wq) - ? cpu_singlethread_map : cpu_populated_map; + return per_cpu_ptr(wq->cpu_wq, cpu); } -static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, - struct workqueue_struct *wq) +static struct cpu_workqueue_struct *target_cwq(unsigned int cpu, + struct workqueue_struct *wq) { - if (unlikely(is_wq_single_threaded(wq))) + if (unlikely(wq->flags & WQ_SINGLE_THREAD)) cpu = singlethread_cpu; - return per_cpu_ptr(wq->cpu_wq, cpu); + return get_cwq(cpu, wq); } /* @@ -279,7 +265,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq, static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct work_struct *work) { - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq); unsigned long flags; debug_work_activate(work); @@ -383,7 +369,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, timer_stats_timer_set_start_info(&dwork->timer); /* This stores cwq for the moment, for the timer_fn */ - set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0); + set_wq_data(work, target_cwq(raw_smp_processor_id(), wq), 0); timer->expires = jiffies + delay; timer->data = (unsigned long)dwork; timer->function = delayed_work_timer_fn; @@ -495,6 +481,10 @@ static int worker_thread(void *__cwq) if (kthread_should_stop()) break; + if (unlikely(!cpumask_equal(&cwq->thread->cpus_allowed, + get_cpu_mask(cwq->cpu)))) + set_cpus_allowed_ptr(cwq->thread, + get_cpu_mask(cwq->cpu)); run_workqueue(cwq); } @@ -574,14 +564,13 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) */ void flush_workqueue(struct workqueue_struct *wq) { - const struct cpumask *cpu_map = wq_cpu_map(wq); int cpu; might_sleep(); lock_map_acquire(&wq->lockdep_map); lock_map_release(&wq->lockdep_map); - for_each_cpu(cpu, cpu_map) - flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); + for_each_possible_cpu(cpu) + flush_cpu_workqueue(get_cwq(cpu, wq)); } EXPORT_SYMBOL_GPL(flush_workqueue); @@ -699,7 +688,6 @@ static void wait_on_work(struct work_struct *work) { struct cpu_workqueue_struct *cwq; struct workqueue_struct *wq; - const struct cpumask *cpu_map; int cpu; might_sleep(); @@ -712,9 +700,8 @@ static void wait_on_work(struct work_struct *work) return; wq = cwq->wq; - cpu_map = wq_cpu_map(wq); - for_each_cpu(cpu, cpu_map) + for_each_possible_cpu(cpu) wait_on_cpu_work(get_cwq(cpu, wq), work); } @@ -972,7 +959,7 @@ int current_is_keventd(void) BUG_ON(!keventd_wq); - cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); + cwq = get_cwq(cpu, keventd_wq); if (current == cwq->thread) ret = 1; @@ -980,26 +967,12 @@ int current_is_keventd(void) } -static struct cpu_workqueue_struct * -init_cpu_workqueue(struct workqueue_struct *wq, int cpu) -{ - struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); - - cwq->wq = wq; - spin_lock_init(&cwq->lock); - INIT_LIST_HEAD(&cwq->worklist); - init_waitqueue_head(&cwq->more_work); - - return cwq; -} - static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) { struct workqueue_struct *wq = cwq->wq; - const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d"; struct task_struct *p; - p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); + p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu); /* * Nobody can add the work_struct to this cwq, * if (caller is __create_workqueue) @@ -1031,8 +1004,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name, struct lock_class_key *key, const char *lock_name) { + bool singlethread = flags & WQ_SINGLE_THREAD; struct workqueue_struct *wq; - struct cpu_workqueue_struct *cwq; int err = 0, cpu; wq = kzalloc(sizeof(*wq), GFP_KERNEL); @@ -1048,37 +1021,37 @@ struct workqueue_struct *__create_workqueue_key(const char *name, lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); INIT_LIST_HEAD(&wq->list); - if (flags & WQ_SINGLE_THREAD) { - cwq = init_cpu_workqueue(wq, singlethread_cpu); - err = create_workqueue_thread(cwq, singlethread_cpu); - start_workqueue_thread(cwq, -1); - } else { - cpu_maps_update_begin(); - /* - * We must place this wq on list even if the code below fails. - * cpu_down(cpu) can remove cpu from cpu_populated_map before - * destroy_workqueue() takes the lock, in that case we leak - * cwq[cpu]->thread. - */ - spin_lock(&workqueue_lock); - list_add(&wq->list, &workqueues); - spin_unlock(&workqueue_lock); - /* - * We must initialize cwqs for each possible cpu even if we - * are going to call destroy_workqueue() finally. Otherwise - * cpu_up() can hit the uninitialized cwq once we drop the - * lock. - */ - for_each_possible_cpu(cpu) { - cwq = init_cpu_workqueue(wq, cpu); - if (err || !cpu_online(cpu)) - continue; - err = create_workqueue_thread(cwq, cpu); + cpu_maps_update_begin(); + /* + * We must initialize cwqs for each possible cpu even if we + * are going to call destroy_workqueue() finally. Otherwise + * cpu_up() can hit the uninitialized cwq once we drop the + * lock. + */ + for_each_possible_cpu(cpu) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + + cwq->wq = wq; + cwq->cpu = cpu; + spin_lock_init(&cwq->lock); + INIT_LIST_HEAD(&cwq->worklist); + init_waitqueue_head(&cwq->more_work); + + if (err) + continue; + err = create_workqueue_thread(cwq, cpu); + if (cpu_online(cpu) && !singlethread) start_workqueue_thread(cwq, cpu); - } - cpu_maps_update_done(); + else + start_workqueue_thread(cwq, -1); } + spin_lock(&workqueue_lock); + list_add(&wq->list, &workqueues); + spin_unlock(&workqueue_lock); + + cpu_maps_update_done(); + if (err) { destroy_workqueue(wq); wq = NULL; @@ -1128,17 +1101,16 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) */ void destroy_workqueue(struct workqueue_struct *wq) { - const struct cpumask *cpu_map = wq_cpu_map(wq); int cpu; cpu_maps_update_begin(); spin_lock(&workqueue_lock); list_del(&wq->list); spin_unlock(&workqueue_lock); + cpu_maps_update_done(); - for_each_cpu(cpu, cpu_map) - cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); - cpu_maps_update_done(); + for_each_possible_cpu(cpu) + cleanup_workqueue_thread(get_cwq(cpu, wq)); free_percpu(wq->cpu_wq); kfree(wq); @@ -1152,48 +1124,25 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, unsigned int cpu = (unsigned long)hcpu; struct cpu_workqueue_struct *cwq; struct workqueue_struct *wq; - int err = 0; action &= ~CPU_TASKS_FROZEN; - switch (action) { - case CPU_UP_PREPARE: - cpumask_set_cpu(cpu, cpu_populated_map); - } -undo: list_for_each_entry(wq, &workqueues, list) { - cwq = per_cpu_ptr(wq->cpu_wq, cpu); + if (wq->flags & WQ_SINGLE_THREAD) + continue; - switch (action) { - case CPU_UP_PREPARE: - err = create_workqueue_thread(cwq, cpu); - if (!err) - break; - printk(KERN_ERR "workqueue [%s] for %i failed\n", - wq->name, cpu); - action = CPU_UP_CANCELED; - err = -ENOMEM; - goto undo; - - case CPU_ONLINE: - start_workqueue_thread(cwq, cpu); - break; + cwq = get_cwq(cpu, wq); - case CPU_UP_CANCELED: - start_workqueue_thread(cwq, -1); + switch (action) { case CPU_POST_DEAD: - cleanup_workqueue_thread(cwq); + lock_map_acquire(&cwq->wq->lockdep_map); + lock_map_release(&cwq->wq->lockdep_map); + flush_cpu_workqueue(cwq); break; } } - switch (action) { - case CPU_UP_CANCELED: - case CPU_POST_DEAD: - cpumask_clear_cpu(cpu, cpu_populated_map); - } - - return notifier_from_errno(err); + return notifier_from_errno(0); } #ifdef CONFIG_SMP @@ -1245,11 +1194,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu); void __init init_workqueues(void) { - alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL); - - cpumask_copy(cpu_populated_map, cpu_online_mask); singlethread_cpu = cpumask_first(cpu_possible_mask); - cpu_singlethread_map = cpumask_of(singlethread_cpu); hotcpu_notifier(workqueue_cpu_callback, 0); keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq); -- cgit v1.1 From 0f900049cbe2767d47c2a62b54f0e822e1d66840 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:11 +0200 Subject: workqueue: update cwq alignement work->data field is used for two purposes. It points to cwq it's queued on and the lower bits are used for flags. Currently, two bits are reserved which is always safe as 4 byte alignment is guaranteed on every architecture. However, future changes will need more flag bits. On SMP, the percpu allocator is capable of honoring larger alignment (there are other users which depend on it) and larger alignment works just fine. On UP, percpu allocator is a thin wrapper around kzalloc/kfree() and don't honor alignment request. This patch introduces WORK_STRUCT_FLAG_BITS and implements alloc/free_cwqs() which guarantees max(1 << WORK_STRUCT_FLAG_BITS, __alignof__(unsigned long long) alignment both on SMP and UP. On SMP, simply wrapping percpu allocator is enough. On UP, extra space is allocated so that cwq can be aligned and the original pointer can be stored after it which is used in the free path. * Alignment problem on UP is reported by Michal Simek. Signed-off-by: Tejun Heo Cc: Christoph Lameter Cc: Ingo Molnar Cc: Frederic Weisbecker Reported-by: Michal Simek --- kernel/workqueue.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 55 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index dc78956..74a3849 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -46,7 +46,9 @@ /* * The per-CPU workqueue (if single thread, we always use the first - * possible cpu). + * possible cpu). The lower WORK_STRUCT_FLAG_BITS of + * work_struct->data are used for flags and thus cwqs need to be + * aligned at two's power of the number of flag bits. */ struct cpu_workqueue_struct { @@ -59,7 +61,7 @@ struct cpu_workqueue_struct { struct workqueue_struct *wq; /* I: the owning workqueue */ struct task_struct *thread; -} ____cacheline_aligned; +}; /* * The externally visible workqueue abstraction is an array of @@ -967,6 +969,53 @@ int current_is_keventd(void) } +static struct cpu_workqueue_struct *alloc_cwqs(void) +{ + /* + * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. + * Make sure that the alignment isn't lower than that of + * unsigned long long. + */ + const size_t size = sizeof(struct cpu_workqueue_struct); + const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, + __alignof__(unsigned long long)); + struct cpu_workqueue_struct *cwqs; +#ifndef CONFIG_SMP + void *ptr; + + /* + * On UP, percpu allocator doesn't honor alignment parameter + * and simply uses arch-dependent default. Allocate enough + * room to align cwq and put an extra pointer at the end + * pointing back to the originally allocated pointer which + * will be used for free. + * + * FIXME: This really belongs to UP percpu code. Update UP + * percpu code to honor alignment and remove this ugliness. + */ + ptr = __alloc_percpu(size + align + sizeof(void *), 1); + cwqs = PTR_ALIGN(ptr, align); + *(void **)per_cpu_ptr(cwqs + 1, 0) = ptr; +#else + /* On SMP, percpu allocator can do it itself */ + cwqs = __alloc_percpu(size, align); +#endif + /* just in case, make sure it's actually aligned */ + BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align)); + return cwqs; +} + +static void free_cwqs(struct cpu_workqueue_struct *cwqs) +{ +#ifndef CONFIG_SMP + /* on UP, the pointer to free is stored right after the cwq */ + if (cwqs) + free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0)); +#else + free_percpu(cwqs); +#endif +} + static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) { struct workqueue_struct *wq = cwq->wq; @@ -1012,7 +1061,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, if (!wq) goto err; - wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); + wq->cpu_wq = alloc_cwqs(); if (!wq->cpu_wq) goto err; @@ -1031,6 +1080,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, for_each_possible_cpu(cpu) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); cwq->wq = wq; cwq->cpu = cpu; spin_lock_init(&cwq->lock); @@ -1059,7 +1109,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, return wq; err: if (wq) { - free_percpu(wq->cpu_wq); + free_cwqs(wq->cpu_wq); kfree(wq); } return NULL; @@ -1112,7 +1162,7 @@ void destroy_workqueue(struct workqueue_struct *wq) for_each_possible_cpu(cpu) cleanup_workqueue_thread(get_cwq(cpu, wq)); - free_percpu(wq->cpu_wq); + free_cwqs(wq->cpu_wq); kfree(wq); } EXPORT_SYMBOL_GPL(destroy_workqueue); -- cgit v1.1 From 73f53c4aa732eced5fcb1844d3d452c30905f20f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:11 +0200 Subject: workqueue: reimplement workqueue flushing using color coded works Reimplement workqueue flushing using color coded works. wq has the current work color which is painted on the works being issued via cwqs. Flushing a workqueue is achieved by advancing the current work colors of cwqs and waiting for all the works which have any of the previous colors to drain. Currently there are 16 possible colors, one is reserved for no color and 15 colors are useable allowing 14 concurrent flushes. When color space gets full, flush attempts are batched up and processed together when color frees up, so even with many concurrent flushers, the new implementation won't build up huge queue of flushers which has to be processed one after another. Only works which are queued via __queue_work() are colored. Works which are directly put on queue using insert_work() use NO_COLOR and don't participate in workqueue flushing. Currently only works used for work-specific flush fall in this category. This new implementation leaves only cleanup_workqueue_thread() as the user of flush_cpu_workqueue(). Just make its users use flush_workqueue() and kthread_stop() directly and kill cleanup_workqueue_thread(). As workqueue flushing doesn't use barrier request anymore, the comment describing the complex synchronization around it in cleanup_workqueue_thread() is removed together with the function. This new implementation is to allow having and sharing multiple workers per cpu. Please note that one more bit is reserved for a future work flag by this patch. This is to avoid shifting bits and updating comments later. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 355 +++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 303 insertions(+), 52 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 74a3849..56e47c5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -41,6 +41,8 @@ * * L: cwq->lock protected. Access with cwq->lock held. * + * F: wq->flush_mutex protected. + * * W: workqueue_lock protected. */ @@ -60,10 +62,23 @@ struct cpu_workqueue_struct { unsigned int cpu; struct workqueue_struct *wq; /* I: the owning workqueue */ + int work_color; /* L: current color */ + int flush_color; /* L: flushing color */ + int nr_in_flight[WORK_NR_COLORS]; + /* L: nr of in_flight works */ struct task_struct *thread; }; /* + * Structure used to wait for workqueue flush. + */ +struct wq_flusher { + struct list_head list; /* F: list of flushers */ + int flush_color; /* F: flush color waiting for */ + struct completion done; /* flush completion */ +}; + +/* * The externally visible workqueue abstraction is an array of * per-CPU workqueues: */ @@ -71,6 +86,15 @@ struct workqueue_struct { unsigned int flags; /* I: WQ_* flags */ struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */ struct list_head list; /* W: list of all workqueues */ + + struct mutex flush_mutex; /* protects wq flushing */ + int work_color; /* F: current work color */ + int flush_color; /* F: current flush color */ + atomic_t nr_cwqs_to_flush; /* flush in progress */ + struct wq_flusher *first_flusher; /* F: first flusher */ + struct list_head flusher_queue; /* F: flush waiters */ + struct list_head flusher_overflow; /* F: flush overflow list */ + const char *name; /* I: workqueue name */ #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; @@ -207,6 +231,22 @@ static struct cpu_workqueue_struct *target_cwq(unsigned int cpu, return get_cwq(cpu, wq); } +static unsigned int work_color_to_flags(int color) +{ + return color << WORK_STRUCT_COLOR_SHIFT; +} + +static int get_work_color(struct work_struct *work) +{ + return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & + ((1 << WORK_STRUCT_COLOR_BITS) - 1); +} + +static int work_next_color(int color) +{ + return (color + 1) % WORK_NR_COLORS; +} + /* * Set the workqueue on which a work item is to be run * - Must *only* be called if the pending flag is set @@ -273,7 +313,9 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, debug_work_activate(work); spin_lock_irqsave(&cwq->lock, flags); BUG_ON(!list_empty(&work->entry)); - insert_work(cwq, work, &cwq->worklist, 0); + cwq->nr_in_flight[cwq->work_color]++; + insert_work(cwq, work, &cwq->worklist, + work_color_to_flags(cwq->work_color)); spin_unlock_irqrestore(&cwq->lock, flags); } @@ -387,6 +429,44 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, EXPORT_SYMBOL_GPL(queue_delayed_work_on); /** + * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight + * @cwq: cwq of interest + * @color: color of work which left the queue + * + * A work either has completed or is removed from pending queue, + * decrement nr_in_flight of its cwq and handle workqueue flushing. + * + * CONTEXT: + * spin_lock_irq(cwq->lock). + */ +static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) +{ + /* ignore uncolored works */ + if (color == WORK_NO_COLOR) + return; + + cwq->nr_in_flight[color]--; + + /* is flush in progress and are we at the flushing tip? */ + if (likely(cwq->flush_color != color)) + return; + + /* are there still in-flight works? */ + if (cwq->nr_in_flight[color]) + return; + + /* this cwq is done, clear flush_color */ + cwq->flush_color = -1; + + /* + * If this was the last cwq, wake up the first flusher. It + * will handle the rest. + */ + if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush)) + complete(&cwq->wq->first_flusher->done); +} + +/** * process_one_work - process single work * @cwq: cwq to process work for * @work: work to process @@ -404,6 +484,7 @@ static void process_one_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) { work_func_t f = work->func; + int work_color; #ifdef CONFIG_LOCKDEP /* * It is permissible to free the struct work_struct from @@ -417,6 +498,7 @@ static void process_one_work(struct cpu_workqueue_struct *cwq, /* claim and process */ debug_work_deactivate(work); cwq->current_work = work; + work_color = get_work_color(work); list_del_init(&work->entry); spin_unlock_irq(&cwq->lock); @@ -443,6 +525,7 @@ static void process_one_work(struct cpu_workqueue_struct *cwq, /* we're done with it, release */ cwq->current_work = NULL; + cwq_dec_nr_in_flight(cwq, work_color); } static void run_workqueue(struct cpu_workqueue_struct *cwq) @@ -529,29 +612,78 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, init_completion(&barr->done); debug_work_activate(&barr->work); - insert_work(cwq, &barr->work, head, 0); + insert_work(cwq, &barr->work, head, work_color_to_flags(WORK_NO_COLOR)); } -static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) +/** + * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing + * @wq: workqueue being flushed + * @flush_color: new flush color, < 0 for no-op + * @work_color: new work color, < 0 for no-op + * + * Prepare cwqs for workqueue flushing. + * + * If @flush_color is non-negative, flush_color on all cwqs should be + * -1. If no cwq has in-flight commands at the specified color, all + * cwq->flush_color's stay at -1 and %false is returned. If any cwq + * has in flight commands, its cwq->flush_color is set to + * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq + * wakeup logic is armed and %true is returned. + * + * The caller should have initialized @wq->first_flusher prior to + * calling this function with non-negative @flush_color. If + * @flush_color is negative, no flush color update is done and %false + * is returned. + * + * If @work_color is non-negative, all cwqs should have the same + * work_color which is previous to @work_color and all will be + * advanced to @work_color. + * + * CONTEXT: + * mutex_lock(wq->flush_mutex). + * + * RETURNS: + * %true if @flush_color >= 0 and there's something to flush. %false + * otherwise. + */ +static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, + int flush_color, int work_color) { - int active = 0; - struct wq_barrier barr; + bool wait = false; + unsigned int cpu; - WARN_ON(cwq->thread == current); - - spin_lock_irq(&cwq->lock); - if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { - insert_wq_barrier(cwq, &barr, &cwq->worklist); - active = 1; + if (flush_color >= 0) { + BUG_ON(atomic_read(&wq->nr_cwqs_to_flush)); + atomic_set(&wq->nr_cwqs_to_flush, 1); } - spin_unlock_irq(&cwq->lock); - if (active) { - wait_for_completion(&barr.done); - destroy_work_on_stack(&barr.work); + for_each_possible_cpu(cpu) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + + spin_lock_irq(&cwq->lock); + + if (flush_color >= 0) { + BUG_ON(cwq->flush_color != -1); + + if (cwq->nr_in_flight[flush_color]) { + cwq->flush_color = flush_color; + atomic_inc(&wq->nr_cwqs_to_flush); + wait = true; + } + } + + if (work_color >= 0) { + BUG_ON(work_color != work_next_color(cwq->work_color)); + cwq->work_color = work_color; + } + + spin_unlock_irq(&cwq->lock); } - return active; + if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) + complete(&wq->first_flusher->done); + + return wait; } /** @@ -566,13 +698,143 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) */ void flush_workqueue(struct workqueue_struct *wq) { - int cpu; + struct wq_flusher this_flusher = { + .list = LIST_HEAD_INIT(this_flusher.list), + .flush_color = -1, + .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), + }; + int next_color; - might_sleep(); lock_map_acquire(&wq->lockdep_map); lock_map_release(&wq->lockdep_map); - for_each_possible_cpu(cpu) - flush_cpu_workqueue(get_cwq(cpu, wq)); + + mutex_lock(&wq->flush_mutex); + + /* + * Start-to-wait phase + */ + next_color = work_next_color(wq->work_color); + + if (next_color != wq->flush_color) { + /* + * Color space is not full. The current work_color + * becomes our flush_color and work_color is advanced + * by one. + */ + BUG_ON(!list_empty(&wq->flusher_overflow)); + this_flusher.flush_color = wq->work_color; + wq->work_color = next_color; + + if (!wq->first_flusher) { + /* no flush in progress, become the first flusher */ + BUG_ON(wq->flush_color != this_flusher.flush_color); + + wq->first_flusher = &this_flusher; + + if (!flush_workqueue_prep_cwqs(wq, wq->flush_color, + wq->work_color)) { + /* nothing to flush, done */ + wq->flush_color = next_color; + wq->first_flusher = NULL; + goto out_unlock; + } + } else { + /* wait in queue */ + BUG_ON(wq->flush_color == this_flusher.flush_color); + list_add_tail(&this_flusher.list, &wq->flusher_queue); + flush_workqueue_prep_cwqs(wq, -1, wq->work_color); + } + } else { + /* + * Oops, color space is full, wait on overflow queue. + * The next flush completion will assign us + * flush_color and transfer to flusher_queue. + */ + list_add_tail(&this_flusher.list, &wq->flusher_overflow); + } + + mutex_unlock(&wq->flush_mutex); + + wait_for_completion(&this_flusher.done); + + /* + * Wake-up-and-cascade phase + * + * First flushers are responsible for cascading flushes and + * handling overflow. Non-first flushers can simply return. + */ + if (wq->first_flusher != &this_flusher) + return; + + mutex_lock(&wq->flush_mutex); + + wq->first_flusher = NULL; + + BUG_ON(!list_empty(&this_flusher.list)); + BUG_ON(wq->flush_color != this_flusher.flush_color); + + while (true) { + struct wq_flusher *next, *tmp; + + /* complete all the flushers sharing the current flush color */ + list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { + if (next->flush_color != wq->flush_color) + break; + list_del_init(&next->list); + complete(&next->done); + } + + BUG_ON(!list_empty(&wq->flusher_overflow) && + wq->flush_color != work_next_color(wq->work_color)); + + /* this flush_color is finished, advance by one */ + wq->flush_color = work_next_color(wq->flush_color); + + /* one color has been freed, handle overflow queue */ + if (!list_empty(&wq->flusher_overflow)) { + /* + * Assign the same color to all overflowed + * flushers, advance work_color and append to + * flusher_queue. This is the start-to-wait + * phase for these overflowed flushers. + */ + list_for_each_entry(tmp, &wq->flusher_overflow, list) + tmp->flush_color = wq->work_color; + + wq->work_color = work_next_color(wq->work_color); + + list_splice_tail_init(&wq->flusher_overflow, + &wq->flusher_queue); + flush_workqueue_prep_cwqs(wq, -1, wq->work_color); + } + + if (list_empty(&wq->flusher_queue)) { + BUG_ON(wq->flush_color != wq->work_color); + break; + } + + /* + * Need to flush more colors. Make the next flusher + * the new first flusher and arm cwqs. + */ + BUG_ON(wq->flush_color == wq->work_color); + BUG_ON(wq->flush_color != next->flush_color); + + list_del_init(&next->list); + wq->first_flusher = next; + + if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1)) + break; + + /* + * Meh... this color is already done, clear first + * flusher and repeat cascading. + */ + wq->first_flusher = NULL; + } + +out_unlock: + mutex_unlock(&wq->flush_mutex); } EXPORT_SYMBOL_GPL(flush_workqueue); @@ -659,6 +921,7 @@ static int try_to_grab_pending(struct work_struct *work) if (cwq == get_wq_data(work)) { debug_work_deactivate(work); list_del_init(&work->entry); + cwq_dec_nr_in_flight(cwq, get_work_color(work)); ret = 1; } } @@ -1066,6 +1329,10 @@ struct workqueue_struct *__create_workqueue_key(const char *name, goto err; wq->flags = flags; + mutex_init(&wq->flush_mutex); + atomic_set(&wq->nr_cwqs_to_flush, 0); + INIT_LIST_HEAD(&wq->flusher_queue); + INIT_LIST_HEAD(&wq->flusher_overflow); wq->name = name; lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); INIT_LIST_HEAD(&wq->list); @@ -1083,6 +1350,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); cwq->wq = wq; cwq->cpu = cpu; + cwq->flush_color = -1; spin_lock_init(&cwq->lock); INIT_LIST_HEAD(&cwq->worklist); init_waitqueue_head(&cwq->more_work); @@ -1116,33 +1384,6 @@ err: } EXPORT_SYMBOL_GPL(__create_workqueue_key); -static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) -{ - /* - * Our caller is either destroy_workqueue() or CPU_POST_DEAD, - * cpu_add_remove_lock protects cwq->thread. - */ - if (cwq->thread == NULL) - return; - - lock_map_acquire(&cwq->wq->lockdep_map); - lock_map_release(&cwq->wq->lockdep_map); - - flush_cpu_workqueue(cwq); - /* - * If the caller is CPU_POST_DEAD and cwq->worklist was not empty, - * a concurrent flush_workqueue() can insert a barrier after us. - * However, in that case run_workqueue() won't return and check - * kthread_should_stop() until it flushes all work_struct's. - * When ->worklist becomes empty it is safe to exit because no - * more work_structs can be queued on this cwq: flush_workqueue - * checks list_empty(), and a "normal" queue_work() can't use - * a dead CPU. - */ - kthread_stop(cwq->thread); - cwq->thread = NULL; -} - /** * destroy_workqueue - safely terminate a workqueue * @wq: target workqueue @@ -1159,8 +1400,20 @@ void destroy_workqueue(struct workqueue_struct *wq) spin_unlock(&workqueue_lock); cpu_maps_update_done(); - for_each_possible_cpu(cpu) - cleanup_workqueue_thread(get_cwq(cpu, wq)); + flush_workqueue(wq); + + for_each_possible_cpu(cpu) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + int i; + + if (cwq->thread) { + kthread_stop(cwq->thread); + cwq->thread = NULL; + } + + for (i = 0; i < WORK_NR_COLORS; i++) + BUG_ON(cwq->nr_in_flight[i]); + } free_cwqs(wq->cpu_wq); kfree(wq); @@ -1185,9 +1438,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_POST_DEAD: - lock_map_acquire(&cwq->wq->lockdep_map); - lock_map_release(&cwq->wq->lockdep_map); - flush_cpu_workqueue(cwq); + flush_workqueue(wq); break; } } -- cgit v1.1 From c34056a3fdde777c079cc8a70785c2602f2586cb Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:11 +0200 Subject: workqueue: introduce worker Separate out worker thread related information to struct worker from struct cpu_workqueue_struct and implement helper functions to deal with the new struct worker. The only change which is visible outside is that now workqueue worker are all named "kworker/CPUID:WORKERID" where WORKERID is allocated from per-cpu ida. This is in preparation of concurrency managed workqueue where shared multiple workers would be available per cpu. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 211 +++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 150 insertions(+), 61 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 56e47c5..600db10 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -33,6 +33,7 @@ #include #include #include +#include /* * Structure fields follow one of the following exclusion rules. @@ -46,6 +47,15 @@ * W: workqueue_lock protected. */ +struct cpu_workqueue_struct; + +struct worker { + struct work_struct *current_work; /* L: work being processed */ + struct task_struct *task; /* I: worker task */ + struct cpu_workqueue_struct *cwq; /* I: the associated cwq */ + int id; /* I: worker id */ +}; + /* * The per-CPU workqueue (if single thread, we always use the first * possible cpu). The lower WORK_STRUCT_FLAG_BITS of @@ -58,15 +68,14 @@ struct cpu_workqueue_struct { struct list_head worklist; wait_queue_head_t more_work; - struct work_struct *current_work; unsigned int cpu; + struct worker *worker; struct workqueue_struct *wq; /* I: the owning workqueue */ int work_color; /* L: current color */ int flush_color; /* L: flushing color */ int nr_in_flight[WORK_NR_COLORS]; /* L: nr of in_flight works */ - struct task_struct *thread; }; /* @@ -214,6 +223,9 @@ static inline void debug_work_deactivate(struct work_struct *work) { } /* Serializes the accesses to the list of workqueues. */ static DEFINE_SPINLOCK(workqueue_lock); static LIST_HEAD(workqueues); +static DEFINE_PER_CPU(struct ida, worker_ida); + +static int worker_thread(void *__worker); static int singlethread_cpu __read_mostly; @@ -428,6 +440,105 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, } EXPORT_SYMBOL_GPL(queue_delayed_work_on); +static struct worker *alloc_worker(void) +{ + struct worker *worker; + + worker = kzalloc(sizeof(*worker), GFP_KERNEL); + return worker; +} + +/** + * create_worker - create a new workqueue worker + * @cwq: cwq the new worker will belong to + * @bind: whether to set affinity to @cpu or not + * + * Create a new worker which is bound to @cwq. The returned worker + * can be started by calling start_worker() or destroyed using + * destroy_worker(). + * + * CONTEXT: + * Might sleep. Does GFP_KERNEL allocations. + * + * RETURNS: + * Pointer to the newly created worker. + */ +static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind) +{ + int id = -1; + struct worker *worker = NULL; + + spin_lock(&workqueue_lock); + while (ida_get_new(&per_cpu(worker_ida, cwq->cpu), &id)) { + spin_unlock(&workqueue_lock); + if (!ida_pre_get(&per_cpu(worker_ida, cwq->cpu), GFP_KERNEL)) + goto fail; + spin_lock(&workqueue_lock); + } + spin_unlock(&workqueue_lock); + + worker = alloc_worker(); + if (!worker) + goto fail; + + worker->cwq = cwq; + worker->id = id; + + worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d", + cwq->cpu, id); + if (IS_ERR(worker->task)) + goto fail; + + if (bind) + kthread_bind(worker->task, cwq->cpu); + + return worker; +fail: + if (id >= 0) { + spin_lock(&workqueue_lock); + ida_remove(&per_cpu(worker_ida, cwq->cpu), id); + spin_unlock(&workqueue_lock); + } + kfree(worker); + return NULL; +} + +/** + * start_worker - start a newly created worker + * @worker: worker to start + * + * Start @worker. + * + * CONTEXT: + * spin_lock_irq(cwq->lock). + */ +static void start_worker(struct worker *worker) +{ + wake_up_process(worker->task); +} + +/** + * destroy_worker - destroy a workqueue worker + * @worker: worker to be destroyed + * + * Destroy @worker. + */ +static void destroy_worker(struct worker *worker) +{ + int cpu = worker->cwq->cpu; + int id = worker->id; + + /* sanity check frenzy */ + BUG_ON(worker->current_work); + + kthread_stop(worker->task); + kfree(worker); + + spin_lock(&workqueue_lock); + ida_remove(&per_cpu(worker_ida, cpu), id); + spin_unlock(&workqueue_lock); +} + /** * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight * @cwq: cwq of interest @@ -468,7 +579,7 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) /** * process_one_work - process single work - * @cwq: cwq to process work for + * @worker: self * @work: work to process * * Process @work. This function contains all the logics necessary to @@ -480,9 +591,9 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) * CONTEXT: * spin_lock_irq(cwq->lock) which is released and regrabbed. */ -static void process_one_work(struct cpu_workqueue_struct *cwq, - struct work_struct *work) +static void process_one_work(struct worker *worker, struct work_struct *work) { + struct cpu_workqueue_struct *cwq = worker->cwq; work_func_t f = work->func; int work_color; #ifdef CONFIG_LOCKDEP @@ -497,7 +608,7 @@ static void process_one_work(struct cpu_workqueue_struct *cwq, #endif /* claim and process */ debug_work_deactivate(work); - cwq->current_work = work; + worker->current_work = work; work_color = get_work_color(work); list_del_init(&work->entry); @@ -524,30 +635,33 @@ static void process_one_work(struct cpu_workqueue_struct *cwq, spin_lock_irq(&cwq->lock); /* we're done with it, release */ - cwq->current_work = NULL; + worker->current_work = NULL; cwq_dec_nr_in_flight(cwq, work_color); } -static void run_workqueue(struct cpu_workqueue_struct *cwq) +static void run_workqueue(struct worker *worker) { + struct cpu_workqueue_struct *cwq = worker->cwq; + spin_lock_irq(&cwq->lock); while (!list_empty(&cwq->worklist)) { struct work_struct *work = list_entry(cwq->worklist.next, struct work_struct, entry); - process_one_work(cwq, work); + process_one_work(worker, work); } spin_unlock_irq(&cwq->lock); } /** * worker_thread - the worker thread function - * @__cwq: cwq to serve + * @__worker: self * * The cwq worker thread function. */ -static int worker_thread(void *__cwq) +static int worker_thread(void *__worker) { - struct cpu_workqueue_struct *cwq = __cwq; + struct worker *worker = __worker; + struct cpu_workqueue_struct *cwq = worker->cwq; DEFINE_WAIT(wait); if (cwq->wq->flags & WQ_FREEZEABLE) @@ -566,11 +680,11 @@ static int worker_thread(void *__cwq) if (kthread_should_stop()) break; - if (unlikely(!cpumask_equal(&cwq->thread->cpus_allowed, + if (unlikely(!cpumask_equal(&worker->task->cpus_allowed, get_cpu_mask(cwq->cpu)))) - set_cpus_allowed_ptr(cwq->thread, + set_cpus_allowed_ptr(worker->task, get_cpu_mask(cwq->cpu)); - run_workqueue(cwq); + run_workqueue(worker); } return 0; @@ -873,7 +987,7 @@ int flush_work(struct work_struct *work) goto already_gone; prev = &work->entry; } else { - if (cwq->current_work != work) + if (!cwq->worker || cwq->worker->current_work != work) goto already_gone; prev = &cwq->worklist; } @@ -937,7 +1051,7 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, int running = 0; spin_lock_irq(&cwq->lock); - if (unlikely(cwq->current_work == work)) { + if (unlikely(cwq->worker && cwq->worker->current_work == work)) { insert_wq_barrier(cwq, &barr, cwq->worklist.next); running = 1; } @@ -1225,7 +1339,7 @@ int current_is_keventd(void) BUG_ON(!keventd_wq); cwq = get_cwq(cpu, keventd_wq); - if (current == cwq->thread) + if (current == cwq->worker->task) ret = 1; return ret; @@ -1279,38 +1393,6 @@ static void free_cwqs(struct cpu_workqueue_struct *cwqs) #endif } -static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) -{ - struct workqueue_struct *wq = cwq->wq; - struct task_struct *p; - - p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu); - /* - * Nobody can add the work_struct to this cwq, - * if (caller is __create_workqueue) - * nobody should see this wq - * else // caller is CPU_UP_PREPARE - * cpu is not on cpu_online_map - * so we can abort safely. - */ - if (IS_ERR(p)) - return PTR_ERR(p); - cwq->thread = p; - - return 0; -} - -static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) -{ - struct task_struct *p = cwq->thread; - - if (p != NULL) { - if (cpu >= 0) - kthread_bind(p, cpu); - wake_up_process(p); - } -} - struct workqueue_struct *__create_workqueue_key(const char *name, unsigned int flags, struct lock_class_key *key, @@ -1318,7 +1400,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name, { bool singlethread = flags & WQ_SINGLE_THREAD; struct workqueue_struct *wq; - int err = 0, cpu; + bool failed = false; + unsigned int cpu; wq = kzalloc(sizeof(*wq), GFP_KERNEL); if (!wq) @@ -1348,20 +1431,21 @@ struct workqueue_struct *__create_workqueue_key(const char *name, struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); - cwq->wq = wq; cwq->cpu = cpu; + cwq->wq = wq; cwq->flush_color = -1; spin_lock_init(&cwq->lock); INIT_LIST_HEAD(&cwq->worklist); init_waitqueue_head(&cwq->more_work); - if (err) + if (failed) continue; - err = create_workqueue_thread(cwq, cpu); - if (cpu_online(cpu) && !singlethread) - start_workqueue_thread(cwq, cpu); + cwq->worker = create_worker(cwq, + cpu_online(cpu) && !singlethread); + if (cwq->worker) + start_worker(cwq->worker); else - start_workqueue_thread(cwq, -1); + failed = true; } spin_lock(&workqueue_lock); @@ -1370,7 +1454,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, cpu_maps_update_done(); - if (err) { + if (failed) { destroy_workqueue(wq); wq = NULL; } @@ -1406,9 +1490,9 @@ void destroy_workqueue(struct workqueue_struct *wq) struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); int i; - if (cwq->thread) { - kthread_stop(cwq->thread); - cwq->thread = NULL; + if (cwq->worker) { + destroy_worker(cwq->worker); + cwq->worker = NULL; } for (i = 0; i < WORK_NR_COLORS; i++) @@ -1495,6 +1579,11 @@ EXPORT_SYMBOL_GPL(work_on_cpu); void __init init_workqueues(void) { + unsigned int cpu; + + for_each_possible_cpu(cpu) + ida_init(&per_cpu(worker_ida, cpu)); + singlethread_cpu = cpumask_first(cpu_possible_mask); hotcpu_notifier(workqueue_cpu_callback, 0); keventd_wq = create_workqueue("events"); -- cgit v1.1 From affee4b294a0fc97d67c8a77dc080c4dd262a79e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:12 +0200 Subject: workqueue: reimplement work flushing using linked works A work is linked to the next one by having WORK_STRUCT_LINKED bit set and these links can be chained. When a linked work is dispatched to a worker, all linked works are dispatched to the worker's newly added ->scheduled queue and processed back-to-back. Currently, as there's only single worker per cwq, having linked works doesn't make any visible behavior difference. This change is to prepare for multiple shared workers per cpu. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 152 +++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 131 insertions(+), 21 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 600db10..9953d3c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -51,6 +51,7 @@ struct cpu_workqueue_struct; struct worker { struct work_struct *current_work; /* L: work being processed */ + struct list_head scheduled; /* L: scheduled works */ struct task_struct *task; /* I: worker task */ struct cpu_workqueue_struct *cwq; /* I: the associated cwq */ int id; /* I: worker id */ @@ -445,6 +446,8 @@ static struct worker *alloc_worker(void) struct worker *worker; worker = kzalloc(sizeof(*worker), GFP_KERNEL); + if (worker) + INIT_LIST_HEAD(&worker->scheduled); return worker; } @@ -530,6 +533,7 @@ static void destroy_worker(struct worker *worker) /* sanity check frenzy */ BUG_ON(worker->current_work); + BUG_ON(!list_empty(&worker->scheduled)); kthread_stop(worker->task); kfree(worker); @@ -540,6 +544,47 @@ static void destroy_worker(struct worker *worker) } /** + * move_linked_works - move linked works to a list + * @work: start of series of works to be scheduled + * @head: target list to append @work to + * @nextp: out paramter for nested worklist walking + * + * Schedule linked works starting from @work to @head. Work series to + * be scheduled starts at @work and includes any consecutive work with + * WORK_STRUCT_LINKED set in its predecessor. + * + * If @nextp is not NULL, it's updated to point to the next work of + * the last scheduled work. This allows move_linked_works() to be + * nested inside outer list_for_each_entry_safe(). + * + * CONTEXT: + * spin_lock_irq(cwq->lock). + */ +static void move_linked_works(struct work_struct *work, struct list_head *head, + struct work_struct **nextp) +{ + struct work_struct *n; + + /* + * Linked worklist will always end before the end of the list, + * use NULL for list head. + */ + list_for_each_entry_safe_from(work, n, NULL, entry) { + list_move_tail(&work->entry, head); + if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) + break; + } + + /* + * If we're already inside safe list traversal and have moved + * multiple works to the scheduled queue, the next position + * needs to be updated. + */ + if (nextp) + *nextp = n; +} + +/** * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight * @cwq: cwq of interest * @color: color of work which left the queue @@ -639,17 +684,25 @@ static void process_one_work(struct worker *worker, struct work_struct *work) cwq_dec_nr_in_flight(cwq, work_color); } -static void run_workqueue(struct worker *worker) +/** + * process_scheduled_works - process scheduled works + * @worker: self + * + * Process all scheduled works. Please note that the scheduled list + * may change while processing a work, so this function repeatedly + * fetches a work from the top and executes it. + * + * CONTEXT: + * spin_lock_irq(cwq->lock) which may be released and regrabbed + * multiple times. + */ +static void process_scheduled_works(struct worker *worker) { - struct cpu_workqueue_struct *cwq = worker->cwq; - - spin_lock_irq(&cwq->lock); - while (!list_empty(&cwq->worklist)) { - struct work_struct *work = list_entry(cwq->worklist.next, + while (!list_empty(&worker->scheduled)) { + struct work_struct *work = list_first_entry(&worker->scheduled, struct work_struct, entry); process_one_work(worker, work); } - spin_unlock_irq(&cwq->lock); } /** @@ -684,7 +737,28 @@ static int worker_thread(void *__worker) get_cpu_mask(cwq->cpu)))) set_cpus_allowed_ptr(worker->task, get_cpu_mask(cwq->cpu)); - run_workqueue(worker); + + spin_lock_irq(&cwq->lock); + + while (!list_empty(&cwq->worklist)) { + struct work_struct *work = + list_first_entry(&cwq->worklist, + struct work_struct, entry); + + if (likely(!(*work_data_bits(work) & + WORK_STRUCT_LINKED))) { + /* optimization path, not strictly necessary */ + process_one_work(worker, work); + if (unlikely(!list_empty(&worker->scheduled))) + process_scheduled_works(worker); + } else { + move_linked_works(work, &worker->scheduled, + NULL); + process_scheduled_works(worker); + } + } + + spin_unlock_irq(&cwq->lock); } return 0; @@ -705,16 +779,33 @@ static void wq_barrier_func(struct work_struct *work) * insert_wq_barrier - insert a barrier work * @cwq: cwq to insert barrier into * @barr: wq_barrier to insert - * @head: insertion point + * @target: target work to attach @barr to + * @worker: worker currently executing @target, NULL if @target is not executing * - * Insert barrier @barr into @cwq before @head. + * @barr is linked to @target such that @barr is completed only after + * @target finishes execution. Please note that the ordering + * guarantee is observed only with respect to @target and on the local + * cpu. + * + * Currently, a queued barrier can't be canceled. This is because + * try_to_grab_pending() can't determine whether the work to be + * grabbed is at the head of the queue and thus can't clear LINKED + * flag of the previous work while there must be a valid next work + * after a work with LINKED flag set. + * + * Note that when @worker is non-NULL, @target may be modified + * underneath us, so we can't reliably determine cwq from @target. * * CONTEXT: * spin_lock_irq(cwq->lock). */ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, - struct wq_barrier *barr, struct list_head *head) + struct wq_barrier *barr, + struct work_struct *target, struct worker *worker) { + struct list_head *head; + unsigned int linked = 0; + /* * debugobject calls are safe here even with cwq->lock locked * as we know for sure that this will not trigger any of the @@ -725,8 +816,24 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); init_completion(&barr->done); + /* + * If @target is currently being executed, schedule the + * barrier to the worker; otherwise, put it after @target. + */ + if (worker) + head = worker->scheduled.next; + else { + unsigned long *bits = work_data_bits(target); + + head = target->entry.next; + /* there can already be other linked works, inherit and set */ + linked = *bits & WORK_STRUCT_LINKED; + __set_bit(WORK_STRUCT_LINKED_BIT, bits); + } + debug_work_activate(&barr->work); - insert_work(cwq, &barr->work, head, work_color_to_flags(WORK_NO_COLOR)); + insert_work(cwq, &barr->work, head, + work_color_to_flags(WORK_NO_COLOR) | linked); } /** @@ -964,8 +1071,8 @@ EXPORT_SYMBOL_GPL(flush_workqueue); */ int flush_work(struct work_struct *work) { + struct worker *worker = NULL; struct cpu_workqueue_struct *cwq; - struct list_head *prev; struct wq_barrier barr; might_sleep(); @@ -985,14 +1092,14 @@ int flush_work(struct work_struct *work) smp_rmb(); if (unlikely(cwq != get_wq_data(work))) goto already_gone; - prev = &work->entry; } else { - if (!cwq->worker || cwq->worker->current_work != work) + if (cwq->worker && cwq->worker->current_work == work) + worker = cwq->worker; + if (!worker) goto already_gone; - prev = &cwq->worklist; } - insert_wq_barrier(cwq, &barr, prev->next); + insert_wq_barrier(cwq, &barr, work, worker); spin_unlock_irq(&cwq->lock); wait_for_completion(&barr.done); destroy_work_on_stack(&barr.work); @@ -1048,16 +1155,19 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) { struct wq_barrier barr; - int running = 0; + struct worker *worker; spin_lock_irq(&cwq->lock); + + worker = NULL; if (unlikely(cwq->worker && cwq->worker->current_work == work)) { - insert_wq_barrier(cwq, &barr, cwq->worklist.next); - running = 1; + worker = cwq->worker; + insert_wq_barrier(cwq, &barr, work, worker); } + spin_unlock_irq(&cwq->lock); - if (unlikely(running)) { + if (unlikely(worker)) { wait_for_completion(&barr.done); destroy_work_on_stack(&barr.work); } -- cgit v1.1 From 1e19ffc63dbbaea7a7d1c63d99c38d3e5a4c7edf Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:12 +0200 Subject: workqueue: implement per-cwq active work limit Add cwq->nr_active, cwq->max_active and cwq->delayed_work. nr_active counts the number of active works per cwq. A work is active if it's flushable (colored) and is on cwq's worklist. If nr_active reaches max_active, new works are queued on cwq->delayed_work and activated later as works on the cwq complete and decrement nr_active. cwq->max_active can be specified via the new @max_active parameter to __create_workqueue() and is set to 1 for all workqueues for now. As each cwq has only single worker now, this double queueing doesn't cause any behavior difference visible to its users. This will be used to reimplement freeze/thaw and implement shared worker pool. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 39 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9953d3c..e541b5d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -77,6 +77,9 @@ struct cpu_workqueue_struct { int flush_color; /* L: flushing color */ int nr_in_flight[WORK_NR_COLORS]; /* L: nr of in_flight works */ + int nr_active; /* L: nr of active works */ + int max_active; /* I: max active works */ + struct list_head delayed_works; /* L: delayed works */ }; /* @@ -321,14 +324,24 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct work_struct *work) { struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq); + struct list_head *worklist; unsigned long flags; debug_work_activate(work); + spin_lock_irqsave(&cwq->lock, flags); BUG_ON(!list_empty(&work->entry)); + cwq->nr_in_flight[cwq->work_color]++; - insert_work(cwq, work, &cwq->worklist, - work_color_to_flags(cwq->work_color)); + + if (likely(cwq->nr_active < cwq->max_active)) { + cwq->nr_active++; + worklist = &cwq->worklist; + } else + worklist = &cwq->delayed_works; + + insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); + spin_unlock_irqrestore(&cwq->lock, flags); } @@ -584,6 +597,15 @@ static void move_linked_works(struct work_struct *work, struct list_head *head, *nextp = n; } +static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) +{ + struct work_struct *work = list_first_entry(&cwq->delayed_works, + struct work_struct, entry); + + move_linked_works(work, &cwq->worklist, NULL); + cwq->nr_active++; +} + /** * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight * @cwq: cwq of interest @@ -602,6 +624,12 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) return; cwq->nr_in_flight[color]--; + cwq->nr_active--; + + /* one down, submit a delayed one */ + if (!list_empty(&cwq->delayed_works) && + cwq->nr_active < cwq->max_active) + cwq_activate_first_delayed(cwq); /* is flush in progress and are we at the flushing tip? */ if (likely(cwq->flush_color != color)) @@ -1505,6 +1533,7 @@ static void free_cwqs(struct cpu_workqueue_struct *cwqs) struct workqueue_struct *__create_workqueue_key(const char *name, unsigned int flags, + int max_active, struct lock_class_key *key, const char *lock_name) { @@ -1513,6 +1542,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name, bool failed = false; unsigned int cpu; + max_active = clamp_val(max_active, 1, INT_MAX); + wq = kzalloc(sizeof(*wq), GFP_KERNEL); if (!wq) goto err; @@ -1544,8 +1575,10 @@ struct workqueue_struct *__create_workqueue_key(const char *name, cwq->cpu = cpu; cwq->wq = wq; cwq->flush_color = -1; + cwq->max_active = max_active; spin_lock_init(&cwq->lock); INIT_LIST_HEAD(&cwq->worklist); + INIT_LIST_HEAD(&cwq->delayed_works); init_waitqueue_head(&cwq->more_work); if (failed) @@ -1607,6 +1640,8 @@ void destroy_workqueue(struct workqueue_struct *wq) for (i = 0; i < WORK_NR_COLORS; i++) BUG_ON(cwq->nr_in_flight[i]); + BUG_ON(cwq->nr_active); + BUG_ON(!list_empty(&cwq->delayed_works)); } free_cwqs(wq->cpu_wq); -- cgit v1.1 From a0a1a5fd4fb15ec61117c759fe9f5c16c53d9e9c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:12 +0200 Subject: workqueue: reimplement workqueue freeze using max_active Currently, workqueue freezing is implemented by marking the worker freezeable and calling try_to_freeze() from dispatch loop. Reimplement it using cwq->limit so that the workqueue is frozen instead of the worker. * workqueue_struct->saved_max_active is added which stores the specified max_active on initialization. * On freeze, all cwq->max_active's are quenched to zero. Freezing is complete when nr_active on all cwqs reach zero. * On thaw, all cwq->max_active's are restored to wq->saved_max_active and the worklist is repopulated. This new implementation allows having single shared pool of workers per cpu. Signed-off-by: Tejun Heo --- kernel/power/process.c | 21 ++++++- kernel/workqueue.c | 163 ++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 172 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/power/process.c b/kernel/power/process.c index 71ae290..028a995 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -15,6 +15,7 @@ #include #include #include +#include /* * Timeout for stopping processes @@ -35,6 +36,7 @@ static int try_to_freeze_tasks(bool sig_only) struct task_struct *g, *p; unsigned long end_time; unsigned int todo; + bool wq_busy = false; struct timeval start, end; u64 elapsed_csecs64; unsigned int elapsed_csecs; @@ -42,6 +44,10 @@ static int try_to_freeze_tasks(bool sig_only) do_gettimeofday(&start); end_time = jiffies + TIMEOUT; + + if (!sig_only) + freeze_workqueues_begin(); + while (true) { todo = 0; read_lock(&tasklist_lock); @@ -63,6 +69,12 @@ static int try_to_freeze_tasks(bool sig_only) todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); + + if (!sig_only) { + wq_busy = freeze_workqueues_busy(); + todo += wq_busy; + } + if (!todo || time_after(jiffies, end_time)) break; @@ -86,8 +98,12 @@ static int try_to_freeze_tasks(bool sig_only) */ printk("\n"); printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " - "(%d tasks refusing to freeze):\n", - elapsed_csecs / 100, elapsed_csecs % 100, todo); + "(%d tasks refusing to freeze, wq_busy=%d):\n", + elapsed_csecs / 100, elapsed_csecs % 100, + todo - wq_busy, wq_busy); + + thaw_workqueues(); + read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); @@ -157,6 +173,7 @@ void thaw_processes(void) oom_killer_enable(); printk("Restarting tasks ... "); + thaw_workqueues(); thaw_tasks(true); thaw_tasks(false); schedule(); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e541b5d..4d059c5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -78,7 +78,7 @@ struct cpu_workqueue_struct { int nr_in_flight[WORK_NR_COLORS]; /* L: nr of in_flight works */ int nr_active; /* L: nr of active works */ - int max_active; /* I: max active works */ + int max_active; /* L: max active works */ struct list_head delayed_works; /* L: delayed works */ }; @@ -108,6 +108,7 @@ struct workqueue_struct { struct list_head flusher_queue; /* F: flush waiters */ struct list_head flusher_overflow; /* F: flush overflow list */ + int saved_max_active; /* I: saved cwq max_active */ const char *name; /* I: workqueue name */ #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; @@ -228,6 +229,7 @@ static inline void debug_work_deactivate(struct work_struct *work) { } static DEFINE_SPINLOCK(workqueue_lock); static LIST_HEAD(workqueues); static DEFINE_PER_CPU(struct ida, worker_ida); +static bool workqueue_freezing; /* W: have wqs started freezing? */ static int worker_thread(void *__worker); @@ -745,19 +747,13 @@ static int worker_thread(void *__worker) struct cpu_workqueue_struct *cwq = worker->cwq; DEFINE_WAIT(wait); - if (cwq->wq->flags & WQ_FREEZEABLE) - set_freezable(); - for (;;) { prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); - if (!freezing(current) && - !kthread_should_stop() && + if (!kthread_should_stop() && list_empty(&cwq->worklist)) schedule(); finish_wait(&cwq->more_work, &wait); - try_to_freeze(); - if (kthread_should_stop()) break; @@ -1553,6 +1549,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, goto err; wq->flags = flags; + wq->saved_max_active = max_active; mutex_init(&wq->flush_mutex); atomic_set(&wq->nr_cwqs_to_flush, 0); INIT_LIST_HEAD(&wq->flusher_queue); @@ -1591,8 +1588,19 @@ struct workqueue_struct *__create_workqueue_key(const char *name, failed = true; } + /* + * workqueue_lock protects global freeze state and workqueues + * list. Grab it, set max_active accordingly and add the new + * workqueue to workqueues list. + */ spin_lock(&workqueue_lock); + + if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) + for_each_possible_cpu(cpu) + get_cwq(cpu, wq)->max_active = 0; + list_add(&wq->list, &workqueues); + spin_unlock(&workqueue_lock); cpu_maps_update_done(); @@ -1621,14 +1629,18 @@ void destroy_workqueue(struct workqueue_struct *wq) { int cpu; + flush_workqueue(wq); + + /* + * wq list is used to freeze wq, remove from list after + * flushing is complete in case freeze races us. + */ cpu_maps_update_begin(); spin_lock(&workqueue_lock); list_del(&wq->list); spin_unlock(&workqueue_lock); cpu_maps_update_done(); - flush_workqueue(wq); - for_each_possible_cpu(cpu) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); int i; @@ -1722,6 +1734,137 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) EXPORT_SYMBOL_GPL(work_on_cpu); #endif /* CONFIG_SMP */ +#ifdef CONFIG_FREEZER + +/** + * freeze_workqueues_begin - begin freezing workqueues + * + * Start freezing workqueues. After this function returns, all + * freezeable workqueues will queue new works to their frozen_works + * list instead of the cwq ones. + * + * CONTEXT: + * Grabs and releases workqueue_lock and cwq->lock's. + */ +void freeze_workqueues_begin(void) +{ + struct workqueue_struct *wq; + unsigned int cpu; + + spin_lock(&workqueue_lock); + + BUG_ON(workqueue_freezing); + workqueue_freezing = true; + + for_each_possible_cpu(cpu) { + list_for_each_entry(wq, &workqueues, list) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + + spin_lock_irq(&cwq->lock); + + if (wq->flags & WQ_FREEZEABLE) + cwq->max_active = 0; + + spin_unlock_irq(&cwq->lock); + } + } + + spin_unlock(&workqueue_lock); +} + +/** + * freeze_workqueues_busy - are freezeable workqueues still busy? + * + * Check whether freezing is complete. This function must be called + * between freeze_workqueues_begin() and thaw_workqueues(). + * + * CONTEXT: + * Grabs and releases workqueue_lock. + * + * RETURNS: + * %true if some freezeable workqueues are still busy. %false if + * freezing is complete. + */ +bool freeze_workqueues_busy(void) +{ + struct workqueue_struct *wq; + unsigned int cpu; + bool busy = false; + + spin_lock(&workqueue_lock); + + BUG_ON(!workqueue_freezing); + + for_each_possible_cpu(cpu) { + /* + * nr_active is monotonically decreasing. It's safe + * to peek without lock. + */ + list_for_each_entry(wq, &workqueues, list) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + + if (!(wq->flags & WQ_FREEZEABLE)) + continue; + + BUG_ON(cwq->nr_active < 0); + if (cwq->nr_active) { + busy = true; + goto out_unlock; + } + } + } +out_unlock: + spin_unlock(&workqueue_lock); + return busy; +} + +/** + * thaw_workqueues - thaw workqueues + * + * Thaw workqueues. Normal queueing is restored and all collected + * frozen works are transferred to their respective cwq worklists. + * + * CONTEXT: + * Grabs and releases workqueue_lock and cwq->lock's. + */ +void thaw_workqueues(void) +{ + struct workqueue_struct *wq; + unsigned int cpu; + + spin_lock(&workqueue_lock); + + if (!workqueue_freezing) + goto out_unlock; + + for_each_possible_cpu(cpu) { + list_for_each_entry(wq, &workqueues, list) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + + if (!(wq->flags & WQ_FREEZEABLE)) + continue; + + spin_lock_irq(&cwq->lock); + + /* restore max_active and repopulate worklist */ + cwq->max_active = wq->saved_max_active; + + while (!list_empty(&cwq->delayed_works) && + cwq->nr_active < cwq->max_active) + cwq_activate_first_delayed(cwq); + + wake_up(&cwq->more_work); + + spin_unlock_irq(&cwq->lock); + } + } + + workqueue_freezing = false; +out_unlock: + spin_unlock(&workqueue_lock); +} +#endif /* CONFIG_FREEZER */ + void __init init_workqueues(void) { unsigned int cpu; -- cgit v1.1 From 8b03ae3cde59af9facab7c831b4141515d5dbcc8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:12 +0200 Subject: workqueue: introduce global cwq and unify cwq locks There is one gcwq (global cwq) per each cpu and all cwqs on an cpu point to it. A gcwq contains a lock to be used by all cwqs on the cpu and an ida to give IDs to workers belonging to the cpu. This patch introduces gcwq, moves worker_ida into gcwq and make all cwqs on the same cpu use the cpu's gcwq->lock instead of separate locks. gcwq->ida is now protected by gcwq->lock too. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 160 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 98 insertions(+), 62 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4d059c5..b043f57 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -40,38 +40,45 @@ * * I: Set during initialization and read-only afterwards. * - * L: cwq->lock protected. Access with cwq->lock held. + * L: gcwq->lock protected. Access with gcwq->lock held. * * F: wq->flush_mutex protected. * * W: workqueue_lock protected. */ +struct global_cwq; struct cpu_workqueue_struct; struct worker { struct work_struct *current_work; /* L: work being processed */ struct list_head scheduled; /* L: scheduled works */ struct task_struct *task; /* I: worker task */ + struct global_cwq *gcwq; /* I: the associated gcwq */ struct cpu_workqueue_struct *cwq; /* I: the associated cwq */ int id; /* I: worker id */ }; /* + * Global per-cpu workqueue. + */ +struct global_cwq { + spinlock_t lock; /* the gcwq lock */ + unsigned int cpu; /* I: the associated cpu */ + struct ida worker_ida; /* L: for worker IDs */ +} ____cacheline_aligned_in_smp; + +/* * The per-CPU workqueue (if single thread, we always use the first * possible cpu). The lower WORK_STRUCT_FLAG_BITS of * work_struct->data are used for flags and thus cwqs need to be * aligned at two's power of the number of flag bits. */ struct cpu_workqueue_struct { - - spinlock_t lock; - + struct global_cwq *gcwq; /* I: the associated gcwq */ struct list_head worklist; wait_queue_head_t more_work; - unsigned int cpu; struct worker *worker; - struct workqueue_struct *wq; /* I: the owning workqueue */ int work_color; /* L: current color */ int flush_color; /* L: flushing color */ @@ -228,13 +235,19 @@ static inline void debug_work_deactivate(struct work_struct *work) { } /* Serializes the accesses to the list of workqueues. */ static DEFINE_SPINLOCK(workqueue_lock); static LIST_HEAD(workqueues); -static DEFINE_PER_CPU(struct ida, worker_ida); static bool workqueue_freezing; /* W: have wqs started freezing? */ +static DEFINE_PER_CPU(struct global_cwq, global_cwq); + static int worker_thread(void *__worker); static int singlethread_cpu __read_mostly; +static struct global_cwq *get_gcwq(unsigned int cpu) +{ + return &per_cpu(global_cwq, cpu); +} + static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, struct workqueue_struct *wq) { @@ -303,7 +316,7 @@ static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) * Insert @work into @cwq after @head. * * CONTEXT: - * spin_lock_irq(cwq->lock). + * spin_lock_irq(gcwq->lock). */ static void insert_work(struct cpu_workqueue_struct *cwq, struct work_struct *work, struct list_head *head, @@ -326,12 +339,13 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct work_struct *work) { struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq); + struct global_cwq *gcwq = cwq->gcwq; struct list_head *worklist; unsigned long flags; debug_work_activate(work); - spin_lock_irqsave(&cwq->lock, flags); + spin_lock_irqsave(&gcwq->lock, flags); BUG_ON(!list_empty(&work->entry)); cwq->nr_in_flight[cwq->work_color]++; @@ -344,7 +358,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); - spin_unlock_irqrestore(&cwq->lock, flags); + spin_unlock_irqrestore(&gcwq->lock, flags); } /** @@ -483,39 +497,41 @@ static struct worker *alloc_worker(void) */ static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind) { + struct global_cwq *gcwq = cwq->gcwq; int id = -1; struct worker *worker = NULL; - spin_lock(&workqueue_lock); - while (ida_get_new(&per_cpu(worker_ida, cwq->cpu), &id)) { - spin_unlock(&workqueue_lock); - if (!ida_pre_get(&per_cpu(worker_ida, cwq->cpu), GFP_KERNEL)) + spin_lock_irq(&gcwq->lock); + while (ida_get_new(&gcwq->worker_ida, &id)) { + spin_unlock_irq(&gcwq->lock); + if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL)) goto fail; - spin_lock(&workqueue_lock); + spin_lock_irq(&gcwq->lock); } - spin_unlock(&workqueue_lock); + spin_unlock_irq(&gcwq->lock); worker = alloc_worker(); if (!worker) goto fail; + worker->gcwq = gcwq; worker->cwq = cwq; worker->id = id; worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d", - cwq->cpu, id); + gcwq->cpu, id); if (IS_ERR(worker->task)) goto fail; if (bind) - kthread_bind(worker->task, cwq->cpu); + kthread_bind(worker->task, gcwq->cpu); return worker; fail: if (id >= 0) { - spin_lock(&workqueue_lock); - ida_remove(&per_cpu(worker_ida, cwq->cpu), id); - spin_unlock(&workqueue_lock); + spin_lock_irq(&gcwq->lock); + ida_remove(&gcwq->worker_ida, id); + spin_unlock_irq(&gcwq->lock); } kfree(worker); return NULL; @@ -528,7 +544,7 @@ fail: * Start @worker. * * CONTEXT: - * spin_lock_irq(cwq->lock). + * spin_lock_irq(gcwq->lock). */ static void start_worker(struct worker *worker) { @@ -543,7 +559,7 @@ static void start_worker(struct worker *worker) */ static void destroy_worker(struct worker *worker) { - int cpu = worker->cwq->cpu; + struct global_cwq *gcwq = worker->gcwq; int id = worker->id; /* sanity check frenzy */ @@ -553,9 +569,9 @@ static void destroy_worker(struct worker *worker) kthread_stop(worker->task); kfree(worker); - spin_lock(&workqueue_lock); - ida_remove(&per_cpu(worker_ida, cpu), id); - spin_unlock(&workqueue_lock); + spin_lock_irq(&gcwq->lock); + ida_remove(&gcwq->worker_ida, id); + spin_unlock_irq(&gcwq->lock); } /** @@ -573,7 +589,7 @@ static void destroy_worker(struct worker *worker) * nested inside outer list_for_each_entry_safe(). * * CONTEXT: - * spin_lock_irq(cwq->lock). + * spin_lock_irq(gcwq->lock). */ static void move_linked_works(struct work_struct *work, struct list_head *head, struct work_struct **nextp) @@ -617,7 +633,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) * decrement nr_in_flight of its cwq and handle workqueue flushing. * * CONTEXT: - * spin_lock_irq(cwq->lock). + * spin_lock_irq(gcwq->lock). */ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) { @@ -664,11 +680,12 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) * call this function to process a work. * * CONTEXT: - * spin_lock_irq(cwq->lock) which is released and regrabbed. + * spin_lock_irq(gcwq->lock) which is released and regrabbed. */ static void process_one_work(struct worker *worker, struct work_struct *work) { struct cpu_workqueue_struct *cwq = worker->cwq; + struct global_cwq *gcwq = cwq->gcwq; work_func_t f = work->func; int work_color; #ifdef CONFIG_LOCKDEP @@ -687,7 +704,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) work_color = get_work_color(work); list_del_init(&work->entry); - spin_unlock_irq(&cwq->lock); + spin_unlock_irq(&gcwq->lock); BUG_ON(get_wq_data(work) != cwq); work_clear_pending(work); @@ -707,7 +724,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) dump_stack(); } - spin_lock_irq(&cwq->lock); + spin_lock_irq(&gcwq->lock); /* we're done with it, release */ worker->current_work = NULL; @@ -723,7 +740,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) * fetches a work from the top and executes it. * * CONTEXT: - * spin_lock_irq(cwq->lock) which may be released and regrabbed + * spin_lock_irq(gcwq->lock) which may be released and regrabbed * multiple times. */ static void process_scheduled_works(struct worker *worker) @@ -744,6 +761,7 @@ static void process_scheduled_works(struct worker *worker) static int worker_thread(void *__worker) { struct worker *worker = __worker; + struct global_cwq *gcwq = worker->gcwq; struct cpu_workqueue_struct *cwq = worker->cwq; DEFINE_WAIT(wait); @@ -758,11 +776,11 @@ static int worker_thread(void *__worker) break; if (unlikely(!cpumask_equal(&worker->task->cpus_allowed, - get_cpu_mask(cwq->cpu)))) + get_cpu_mask(gcwq->cpu)))) set_cpus_allowed_ptr(worker->task, - get_cpu_mask(cwq->cpu)); + get_cpu_mask(gcwq->cpu)); - spin_lock_irq(&cwq->lock); + spin_lock_irq(&gcwq->lock); while (!list_empty(&cwq->worklist)) { struct work_struct *work = @@ -782,7 +800,7 @@ static int worker_thread(void *__worker) } } - spin_unlock_irq(&cwq->lock); + spin_unlock_irq(&gcwq->lock); } return 0; @@ -821,7 +839,7 @@ static void wq_barrier_func(struct work_struct *work) * underneath us, so we can't reliably determine cwq from @target. * * CONTEXT: - * spin_lock_irq(cwq->lock). + * spin_lock_irq(gcwq->lock). */ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, struct wq_barrier *barr, @@ -831,7 +849,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, unsigned int linked = 0; /* - * debugobject calls are safe here even with cwq->lock locked + * debugobject calls are safe here even with gcwq->lock locked * as we know for sure that this will not trigger any of the * checks and call back into the fixup functions where we * might deadlock. @@ -904,8 +922,9 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, for_each_possible_cpu(cpu) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + struct global_cwq *gcwq = cwq->gcwq; - spin_lock_irq(&cwq->lock); + spin_lock_irq(&gcwq->lock); if (flush_color >= 0) { BUG_ON(cwq->flush_color != -1); @@ -922,7 +941,7 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, cwq->work_color = work_color; } - spin_unlock_irq(&cwq->lock); + spin_unlock_irq(&gcwq->lock); } if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) @@ -1097,17 +1116,19 @@ int flush_work(struct work_struct *work) { struct worker *worker = NULL; struct cpu_workqueue_struct *cwq; + struct global_cwq *gcwq; struct wq_barrier barr; might_sleep(); cwq = get_wq_data(work); if (!cwq) return 0; + gcwq = cwq->gcwq; lock_map_acquire(&cwq->wq->lockdep_map); lock_map_release(&cwq->wq->lockdep_map); - spin_lock_irq(&cwq->lock); + spin_lock_irq(&gcwq->lock); if (!list_empty(&work->entry)) { /* * See the comment near try_to_grab_pending()->smp_rmb(). @@ -1124,12 +1145,12 @@ int flush_work(struct work_struct *work) } insert_wq_barrier(cwq, &barr, work, worker); - spin_unlock_irq(&cwq->lock); + spin_unlock_irq(&gcwq->lock); wait_for_completion(&barr.done); destroy_work_on_stack(&barr.work); return 1; already_gone: - spin_unlock_irq(&cwq->lock); + spin_unlock_irq(&gcwq->lock); return 0; } EXPORT_SYMBOL_GPL(flush_work); @@ -1140,6 +1161,7 @@ EXPORT_SYMBOL_GPL(flush_work); */ static int try_to_grab_pending(struct work_struct *work) { + struct global_cwq *gcwq; struct cpu_workqueue_struct *cwq; int ret = -1; @@ -1154,8 +1176,9 @@ static int try_to_grab_pending(struct work_struct *work) cwq = get_wq_data(work); if (!cwq) return ret; + gcwq = cwq->gcwq; - spin_lock_irq(&cwq->lock); + spin_lock_irq(&gcwq->lock); if (!list_empty(&work->entry)) { /* * This work is queued, but perhaps we locked the wrong cwq. @@ -1170,7 +1193,7 @@ static int try_to_grab_pending(struct work_struct *work) ret = 1; } } - spin_unlock_irq(&cwq->lock); + spin_unlock_irq(&gcwq->lock); return ret; } @@ -1178,10 +1201,11 @@ static int try_to_grab_pending(struct work_struct *work) static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) { + struct global_cwq *gcwq = cwq->gcwq; struct wq_barrier barr; struct worker *worker; - spin_lock_irq(&cwq->lock); + spin_lock_irq(&gcwq->lock); worker = NULL; if (unlikely(cwq->worker && cwq->worker->current_work == work)) { @@ -1189,7 +1213,7 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, insert_wq_barrier(cwq, &barr, work, worker); } - spin_unlock_irq(&cwq->lock); + spin_unlock_irq(&gcwq->lock); if (unlikely(worker)) { wait_for_completion(&barr.done); @@ -1567,13 +1591,13 @@ struct workqueue_struct *__create_workqueue_key(const char *name, */ for_each_possible_cpu(cpu) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + struct global_cwq *gcwq = get_gcwq(cpu); BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); - cwq->cpu = cpu; + cwq->gcwq = gcwq; cwq->wq = wq; cwq->flush_color = -1; cwq->max_active = max_active; - spin_lock_init(&cwq->lock); INIT_LIST_HEAD(&cwq->worklist); INIT_LIST_HEAD(&cwq->delayed_works); init_waitqueue_head(&cwq->more_work); @@ -1744,7 +1768,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu); * list instead of the cwq ones. * * CONTEXT: - * Grabs and releases workqueue_lock and cwq->lock's. + * Grabs and releases workqueue_lock and gcwq->lock's. */ void freeze_workqueues_begin(void) { @@ -1757,16 +1781,18 @@ void freeze_workqueues_begin(void) workqueue_freezing = true; for_each_possible_cpu(cpu) { + struct global_cwq *gcwq = get_gcwq(cpu); + + spin_lock_irq(&gcwq->lock); + list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - spin_lock_irq(&cwq->lock); - if (wq->flags & WQ_FREEZEABLE) cwq->max_active = 0; - - spin_unlock_irq(&cwq->lock); } + + spin_unlock_irq(&gcwq->lock); } spin_unlock(&workqueue_lock); @@ -1825,7 +1851,7 @@ out_unlock: * frozen works are transferred to their respective cwq worklists. * * CONTEXT: - * Grabs and releases workqueue_lock and cwq->lock's. + * Grabs and releases workqueue_lock and gcwq->lock's. */ void thaw_workqueues(void) { @@ -1838,14 +1864,16 @@ void thaw_workqueues(void) goto out_unlock; for_each_possible_cpu(cpu) { + struct global_cwq *gcwq = get_gcwq(cpu); + + spin_lock_irq(&gcwq->lock); + list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); if (!(wq->flags & WQ_FREEZEABLE)) continue; - spin_lock_irq(&cwq->lock); - /* restore max_active and repopulate worklist */ cwq->max_active = wq->saved_max_active; @@ -1854,9 +1882,9 @@ void thaw_workqueues(void) cwq_activate_first_delayed(cwq); wake_up(&cwq->more_work); - - spin_unlock_irq(&cwq->lock); } + + spin_unlock_irq(&gcwq->lock); } workqueue_freezing = false; @@ -1869,11 +1897,19 @@ void __init init_workqueues(void) { unsigned int cpu; - for_each_possible_cpu(cpu) - ida_init(&per_cpu(worker_ida, cpu)); - singlethread_cpu = cpumask_first(cpu_possible_mask); hotcpu_notifier(workqueue_cpu_callback, 0); + + /* initialize gcwqs */ + for_each_possible_cpu(cpu) { + struct global_cwq *gcwq = get_gcwq(cpu); + + spin_lock_init(&gcwq->lock); + gcwq->cpu = cpu; + + ida_init(&gcwq->worker_ida); + } + keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq); } -- cgit v1.1 From c8e55f360210c1bc49bea5d62bc3939b7ee13483 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:12 +0200 Subject: workqueue: implement worker states Implement worker states. After created, a worker is STARTED. While a worker isn't processing a work, it's IDLE and chained on gcwq->idle_list. While processing a work, a worker is BUSY and chained on gcwq->busy_hash. Also, gcwq now counts the number of all workers and idle ones. worker_thread() is restructured to reflect state transitions. cwq->more_work is removed and waking up a worker makes it check for events. A worker is killed by setting DIE flag while it's IDLE and waking it up. This gives gcwq better visibility of what's going on and allows it to find out whether a work is executing quickly which is necessary to have multiple workers processing the same cwq. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 214 +++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 173 insertions(+), 41 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b043f57..d64913a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -35,6 +35,17 @@ #include #include +enum { + /* worker flags */ + WORKER_STARTED = 1 << 0, /* started */ + WORKER_DIE = 1 << 1, /* die die die */ + WORKER_IDLE = 1 << 2, /* is idle */ + + BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ + BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER, + BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1, +}; + /* * Structure fields follow one of the following exclusion rules. * @@ -51,11 +62,18 @@ struct global_cwq; struct cpu_workqueue_struct; struct worker { + /* on idle list while idle, on busy hash table while busy */ + union { + struct list_head entry; /* L: while idle */ + struct hlist_node hentry; /* L: while busy */ + }; + struct work_struct *current_work; /* L: work being processed */ struct list_head scheduled; /* L: scheduled works */ struct task_struct *task; /* I: worker task */ struct global_cwq *gcwq; /* I: the associated gcwq */ struct cpu_workqueue_struct *cwq; /* I: the associated cwq */ + unsigned int flags; /* L: flags */ int id; /* I: worker id */ }; @@ -65,6 +83,15 @@ struct worker { struct global_cwq { spinlock_t lock; /* the gcwq lock */ unsigned int cpu; /* I: the associated cpu */ + + int nr_workers; /* L: total number of workers */ + int nr_idle; /* L: currently idle ones */ + + /* workers are chained either in the idle_list or busy_hash */ + struct list_head idle_list; /* L: list of idle workers */ + struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE]; + /* L: hash of busy workers */ + struct ida worker_ida; /* L: for worker IDs */ } ____cacheline_aligned_in_smp; @@ -77,7 +104,6 @@ struct global_cwq { struct cpu_workqueue_struct { struct global_cwq *gcwq; /* I: the associated gcwq */ struct list_head worklist; - wait_queue_head_t more_work; struct worker *worker; struct workqueue_struct *wq; /* I: the owning workqueue */ int work_color; /* L: current color */ @@ -307,6 +333,33 @@ static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) } /** + * busy_worker_head - return the busy hash head for a work + * @gcwq: gcwq of interest + * @work: work to be hashed + * + * Return hash head of @gcwq for @work. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock). + * + * RETURNS: + * Pointer to the hash head. + */ +static struct hlist_head *busy_worker_head(struct global_cwq *gcwq, + struct work_struct *work) +{ + const int base_shift = ilog2(sizeof(struct work_struct)); + unsigned long v = (unsigned long)work; + + /* simple shift and fold hash, do we need something better? */ + v >>= base_shift; + v += v >> BUSY_WORKER_HASH_ORDER; + v &= BUSY_WORKER_HASH_MASK; + + return &gcwq->busy_hash[v]; +} + +/** * insert_work - insert a work into cwq * @cwq: cwq @work belongs to * @work: work to insert @@ -332,7 +385,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq, smp_wmb(); list_add_tail(&work->entry, head); - wake_up(&cwq->more_work); + wake_up_process(cwq->worker->task); } static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, @@ -470,13 +523,59 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, } EXPORT_SYMBOL_GPL(queue_delayed_work_on); +/** + * worker_enter_idle - enter idle state + * @worker: worker which is entering idle state + * + * @worker is entering idle state. Update stats and idle timer if + * necessary. + * + * LOCKING: + * spin_lock_irq(gcwq->lock). + */ +static void worker_enter_idle(struct worker *worker) +{ + struct global_cwq *gcwq = worker->gcwq; + + BUG_ON(worker->flags & WORKER_IDLE); + BUG_ON(!list_empty(&worker->entry) && + (worker->hentry.next || worker->hentry.pprev)); + + worker->flags |= WORKER_IDLE; + gcwq->nr_idle++; + + /* idle_list is LIFO */ + list_add(&worker->entry, &gcwq->idle_list); +} + +/** + * worker_leave_idle - leave idle state + * @worker: worker which is leaving idle state + * + * @worker is leaving idle state. Update stats. + * + * LOCKING: + * spin_lock_irq(gcwq->lock). + */ +static void worker_leave_idle(struct worker *worker) +{ + struct global_cwq *gcwq = worker->gcwq; + + BUG_ON(!(worker->flags & WORKER_IDLE)); + worker->flags &= ~WORKER_IDLE; + gcwq->nr_idle--; + list_del_init(&worker->entry); +} + static struct worker *alloc_worker(void) { struct worker *worker; worker = kzalloc(sizeof(*worker), GFP_KERNEL); - if (worker) + if (worker) { + INIT_LIST_HEAD(&worker->entry); INIT_LIST_HEAD(&worker->scheduled); + } return worker; } @@ -541,13 +640,16 @@ fail: * start_worker - start a newly created worker * @worker: worker to start * - * Start @worker. + * Make the gcwq aware of @worker and start it. * * CONTEXT: * spin_lock_irq(gcwq->lock). */ static void start_worker(struct worker *worker) { + worker->flags |= WORKER_STARTED; + worker->gcwq->nr_workers++; + worker_enter_idle(worker); wake_up_process(worker->task); } @@ -555,7 +657,10 @@ static void start_worker(struct worker *worker) * destroy_worker - destroy a workqueue worker * @worker: worker to be destroyed * - * Destroy @worker. + * Destroy @worker and adjust @gcwq stats accordingly. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock) which is released and regrabbed. */ static void destroy_worker(struct worker *worker) { @@ -566,12 +671,21 @@ static void destroy_worker(struct worker *worker) BUG_ON(worker->current_work); BUG_ON(!list_empty(&worker->scheduled)); + if (worker->flags & WORKER_STARTED) + gcwq->nr_workers--; + if (worker->flags & WORKER_IDLE) + gcwq->nr_idle--; + + list_del_init(&worker->entry); + worker->flags |= WORKER_DIE; + + spin_unlock_irq(&gcwq->lock); + kthread_stop(worker->task); kfree(worker); spin_lock_irq(&gcwq->lock); ida_remove(&gcwq->worker_ida, id); - spin_unlock_irq(&gcwq->lock); } /** @@ -686,6 +800,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) { struct cpu_workqueue_struct *cwq = worker->cwq; struct global_cwq *gcwq = cwq->gcwq; + struct hlist_head *bwh = busy_worker_head(gcwq, work); work_func_t f = work->func; int work_color; #ifdef CONFIG_LOCKDEP @@ -700,6 +815,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) #endif /* claim and process */ debug_work_deactivate(work); + hlist_add_head(&worker->hentry, bwh); worker->current_work = work; work_color = get_work_color(work); list_del_init(&work->entry); @@ -727,6 +843,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) spin_lock_irq(&gcwq->lock); /* we're done with it, release */ + hlist_del_init(&worker->hentry); worker->current_work = NULL; cwq_dec_nr_in_flight(cwq, work_color); } @@ -763,47 +880,56 @@ static int worker_thread(void *__worker) struct worker *worker = __worker; struct global_cwq *gcwq = worker->gcwq; struct cpu_workqueue_struct *cwq = worker->cwq; - DEFINE_WAIT(wait); - for (;;) { - prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); - if (!kthread_should_stop() && - list_empty(&cwq->worklist)) - schedule(); - finish_wait(&cwq->more_work, &wait); +woke_up: + if (unlikely(!cpumask_equal(&worker->task->cpus_allowed, + get_cpu_mask(gcwq->cpu)))) + set_cpus_allowed_ptr(worker->task, get_cpu_mask(gcwq->cpu)); - if (kthread_should_stop()) - break; + spin_lock_irq(&gcwq->lock); - if (unlikely(!cpumask_equal(&worker->task->cpus_allowed, - get_cpu_mask(gcwq->cpu)))) - set_cpus_allowed_ptr(worker->task, - get_cpu_mask(gcwq->cpu)); + /* DIE can be set only while we're idle, checking here is enough */ + if (worker->flags & WORKER_DIE) { + spin_unlock_irq(&gcwq->lock); + return 0; + } - spin_lock_irq(&gcwq->lock); + worker_leave_idle(worker); - while (!list_empty(&cwq->worklist)) { - struct work_struct *work = - list_first_entry(&cwq->worklist, - struct work_struct, entry); - - if (likely(!(*work_data_bits(work) & - WORK_STRUCT_LINKED))) { - /* optimization path, not strictly necessary */ - process_one_work(worker, work); - if (unlikely(!list_empty(&worker->scheduled))) - process_scheduled_works(worker); - } else { - move_linked_works(work, &worker->scheduled, - NULL); + /* + * ->scheduled list can only be filled while a worker is + * preparing to process a work or actually processing it. + * Make sure nobody diddled with it while I was sleeping. + */ + BUG_ON(!list_empty(&worker->scheduled)); + + while (!list_empty(&cwq->worklist)) { + struct work_struct *work = + list_first_entry(&cwq->worklist, + struct work_struct, entry); + + if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { + /* optimization path, not strictly necessary */ + process_one_work(worker, work); + if (unlikely(!list_empty(&worker->scheduled))) process_scheduled_works(worker); - } + } else { + move_linked_works(work, &worker->scheduled, NULL); + process_scheduled_works(worker); } - - spin_unlock_irq(&gcwq->lock); } - return 0; + /* + * gcwq->lock is held and there's no work to process, sleep. + * Workers are woken up only while holding gcwq->lock, so + * setting the current state before releasing gcwq->lock is + * enough to prevent losing any event. + */ + worker_enter_idle(worker); + __set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irq(&gcwq->lock); + schedule(); + goto woke_up; } struct wq_barrier { @@ -1600,7 +1726,6 @@ struct workqueue_struct *__create_workqueue_key(const char *name, cwq->max_active = max_active; INIT_LIST_HEAD(&cwq->worklist); INIT_LIST_HEAD(&cwq->delayed_works); - init_waitqueue_head(&cwq->more_work); if (failed) continue; @@ -1651,7 +1776,7 @@ EXPORT_SYMBOL_GPL(__create_workqueue_key); */ void destroy_workqueue(struct workqueue_struct *wq) { - int cpu; + unsigned int cpu; flush_workqueue(wq); @@ -1670,8 +1795,10 @@ void destroy_workqueue(struct workqueue_struct *wq) int i; if (cwq->worker) { + spin_lock_irq(&cwq->gcwq->lock); destroy_worker(cwq->worker); cwq->worker = NULL; + spin_unlock_irq(&cwq->gcwq->lock); } for (i = 0; i < WORK_NR_COLORS; i++) @@ -1881,7 +2008,7 @@ void thaw_workqueues(void) cwq->nr_active < cwq->max_active) cwq_activate_first_delayed(cwq); - wake_up(&cwq->more_work); + wake_up_process(cwq->worker->task); } spin_unlock_irq(&gcwq->lock); @@ -1896,6 +2023,7 @@ out_unlock: void __init init_workqueues(void) { unsigned int cpu; + int i; singlethread_cpu = cpumask_first(cpu_possible_mask); hotcpu_notifier(workqueue_cpu_callback, 0); @@ -1907,6 +2035,10 @@ void __init init_workqueues(void) spin_lock_init(&gcwq->lock); gcwq->cpu = cpu; + INIT_LIST_HEAD(&gcwq->idle_list); + for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) + INIT_HLIST_HEAD(&gcwq->busy_hash[i]); + ida_init(&gcwq->worker_ida); } -- cgit v1.1 From db7bccf45cb87522096b8f43144e31ca605a9f24 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:12 +0200 Subject: workqueue: reimplement CPU hotplugging support using trustee Reimplement CPU hotplugging support using trustee thread. On CPU down, a trustee thread is created and each step of CPU down is executed by the trustee and workqueue_cpu_callback() simply drives and waits for trustee state transitions. CPU down operation no longer waits for works to be drained but trustee sticks around till all pending works have been completed. If CPU is brought back up while works are still draining, workqueue_cpu_callback() tells trustee to step down and tell workers to rebind to the cpu. As it's difficult to tell whether cwqs are empty if it's freezing or frozen, trustee doesn't consider draining to be complete while a gcwq is freezing or frozen (tracked by new GCWQ_FREEZING flag). Also, workers which get unbound from their cpu are marked with WORKER_ROGUE. Trustee based implementation doesn't bring any new feature at this point but it will be used to manage worker pool when dynamic shared worker pool is implemented. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 293 ++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 277 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index d64913a..f57855f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -36,14 +36,27 @@ #include enum { + /* global_cwq flags */ + GCWQ_FREEZING = 1 << 3, /* freeze in progress */ + /* worker flags */ WORKER_STARTED = 1 << 0, /* started */ WORKER_DIE = 1 << 1, /* die die die */ WORKER_IDLE = 1 << 2, /* is idle */ + WORKER_ROGUE = 1 << 4, /* not bound to any cpu */ + + /* gcwq->trustee_state */ + TRUSTEE_START = 0, /* start */ + TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */ + TRUSTEE_BUTCHER = 2, /* butcher workers */ + TRUSTEE_RELEASE = 3, /* release workers */ + TRUSTEE_DONE = 4, /* trustee is done */ BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER, BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1, + + TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ }; /* @@ -83,6 +96,7 @@ struct worker { struct global_cwq { spinlock_t lock; /* the gcwq lock */ unsigned int cpu; /* I: the associated cpu */ + unsigned int flags; /* L: GCWQ_* flags */ int nr_workers; /* L: total number of workers */ int nr_idle; /* L: currently idle ones */ @@ -93,6 +107,10 @@ struct global_cwq { /* L: hash of busy workers */ struct ida worker_ida; /* L: for worker IDs */ + + struct task_struct *trustee; /* L: for gcwq shutdown */ + unsigned int trustee_state; /* L: trustee state */ + wait_queue_head_t trustee_wait; /* trustee wait */ } ____cacheline_aligned_in_smp; /* @@ -148,6 +166,10 @@ struct workqueue_struct { #endif }; +#define for_each_busy_worker(worker, i, pos, gcwq) \ + for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ + hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) + #ifdef CONFIG_DEBUG_OBJECTS_WORK static struct debug_obj_descr work_debug_descr; @@ -546,6 +568,9 @@ static void worker_enter_idle(struct worker *worker) /* idle_list is LIFO */ list_add(&worker->entry, &gcwq->idle_list); + + if (unlikely(worker->flags & WORKER_ROGUE)) + wake_up_all(&gcwq->trustee_wait); } /** @@ -622,8 +647,15 @@ static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind) if (IS_ERR(worker->task)) goto fail; + /* + * A rogue worker will become a regular one if CPU comes + * online later on. Make sure every worker has + * PF_THREAD_BOUND set. + */ if (bind) kthread_bind(worker->task, gcwq->cpu); + else + worker->task->flags |= PF_THREAD_BOUND; return worker; fail: @@ -882,10 +914,6 @@ static int worker_thread(void *__worker) struct cpu_workqueue_struct *cwq = worker->cwq; woke_up: - if (unlikely(!cpumask_equal(&worker->task->cpus_allowed, - get_cpu_mask(gcwq->cpu)))) - set_cpus_allowed_ptr(worker->task, get_cpu_mask(gcwq->cpu)); - spin_lock_irq(&gcwq->lock); /* DIE can be set only while we're idle, checking here is enough */ @@ -895,7 +923,7 @@ woke_up: } worker_leave_idle(worker); - +recheck: /* * ->scheduled list can only be filled while a worker is * preparing to process a work or actually processing it. @@ -908,6 +936,22 @@ woke_up: list_first_entry(&cwq->worklist, struct work_struct, entry); + /* + * The following is a rather inefficient way to close + * race window against cpu hotplug operations. Will + * be replaced soon. + */ + if (unlikely(!(worker->flags & WORKER_ROGUE) && + !cpumask_equal(&worker->task->cpus_allowed, + get_cpu_mask(gcwq->cpu)))) { + spin_unlock_irq(&gcwq->lock); + set_cpus_allowed_ptr(worker->task, + get_cpu_mask(gcwq->cpu)); + cpu_relax(); + spin_lock_irq(&gcwq->lock); + goto recheck; + } + if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { /* optimization path, not strictly necessary */ process_one_work(worker, work); @@ -1812,29 +1856,237 @@ void destroy_workqueue(struct workqueue_struct *wq) } EXPORT_SYMBOL_GPL(destroy_workqueue); +/* + * CPU hotplug. + * + * CPU hotplug is implemented by allowing cwqs to be detached from + * CPU, running with unbound workers and allowing them to be + * reattached later if the cpu comes back online. A separate thread + * is created to govern cwqs in such state and is called the trustee. + * + * Trustee states and their descriptions. + * + * START Command state used on startup. On CPU_DOWN_PREPARE, a + * new trustee is started with this state. + * + * IN_CHARGE Once started, trustee will enter this state after + * making all existing workers rogue. DOWN_PREPARE waits + * for trustee to enter this state. After reaching + * IN_CHARGE, trustee tries to execute the pending + * worklist until it's empty and the state is set to + * BUTCHER, or the state is set to RELEASE. + * + * BUTCHER Command state which is set by the cpu callback after + * the cpu has went down. Once this state is set trustee + * knows that there will be no new works on the worklist + * and once the worklist is empty it can proceed to + * killing idle workers. + * + * RELEASE Command state which is set by the cpu callback if the + * cpu down has been canceled or it has come online + * again. After recognizing this state, trustee stops + * trying to drain or butcher and transits to DONE. + * + * DONE Trustee will enter this state after BUTCHER or RELEASE + * is complete. + * + * trustee CPU draining + * took over down complete + * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE + * | | ^ + * | CPU is back online v return workers | + * ----------------> RELEASE -------------- + */ + +/** + * trustee_wait_event_timeout - timed event wait for trustee + * @cond: condition to wait for + * @timeout: timeout in jiffies + * + * wait_event_timeout() for trustee to use. Handles locking and + * checks for RELEASE request. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock) which may be released and regrabbed + * multiple times. To be used by trustee. + * + * RETURNS: + * Positive indicating left time if @cond is satisfied, 0 if timed + * out, -1 if canceled. + */ +#define trustee_wait_event_timeout(cond, timeout) ({ \ + long __ret = (timeout); \ + while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \ + __ret) { \ + spin_unlock_irq(&gcwq->lock); \ + __wait_event_timeout(gcwq->trustee_wait, (cond) || \ + (gcwq->trustee_state == TRUSTEE_RELEASE), \ + __ret); \ + spin_lock_irq(&gcwq->lock); \ + } \ + gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \ +}) + +/** + * trustee_wait_event - event wait for trustee + * @cond: condition to wait for + * + * wait_event() for trustee to use. Automatically handles locking and + * checks for CANCEL request. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock) which may be released and regrabbed + * multiple times. To be used by trustee. + * + * RETURNS: + * 0 if @cond is satisfied, -1 if canceled. + */ +#define trustee_wait_event(cond) ({ \ + long __ret1; \ + __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\ + __ret1 < 0 ? -1 : 0; \ +}) + +static int __cpuinit trustee_thread(void *__gcwq) +{ + struct global_cwq *gcwq = __gcwq; + struct worker *worker; + struct hlist_node *pos; + int i; + + BUG_ON(gcwq->cpu != smp_processor_id()); + + spin_lock_irq(&gcwq->lock); + /* + * Make all multithread workers rogue. Trustee must be bound + * to the target cpu and can't be cancelled. + */ + BUG_ON(gcwq->cpu != smp_processor_id()); + + list_for_each_entry(worker, &gcwq->idle_list, entry) + if (!(worker->cwq->wq->flags & WQ_SINGLE_THREAD)) + worker->flags |= WORKER_ROGUE; + + for_each_busy_worker(worker, i, pos, gcwq) + if (!(worker->cwq->wq->flags & WQ_SINGLE_THREAD)) + worker->flags |= WORKER_ROGUE; + + /* + * We're now in charge. Notify and proceed to drain. We need + * to keep the gcwq running during the whole CPU down + * procedure as other cpu hotunplug callbacks may need to + * flush currently running tasks. + */ + gcwq->trustee_state = TRUSTEE_IN_CHARGE; + wake_up_all(&gcwq->trustee_wait); + + /* + * The original cpu is in the process of dying and may go away + * anytime now. When that happens, we and all workers would + * be migrated to other cpus. Try draining any left work. + * Note that if the gcwq is frozen, there may be frozen works + * in freezeable cwqs. Don't declare completion while frozen. + */ + while (gcwq->nr_workers != gcwq->nr_idle || + gcwq->flags & GCWQ_FREEZING || + gcwq->trustee_state == TRUSTEE_IN_CHARGE) { + /* give a breather */ + if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0) + break; + } + + /* notify completion */ + gcwq->trustee = NULL; + gcwq->trustee_state = TRUSTEE_DONE; + wake_up_all(&gcwq->trustee_wait); + spin_unlock_irq(&gcwq->lock); + return 0; +} + +/** + * wait_trustee_state - wait for trustee to enter the specified state + * @gcwq: gcwq the trustee of interest belongs to + * @state: target state to wait for + * + * Wait for the trustee to reach @state. DONE is already matched. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock) which may be released and regrabbed + * multiple times. To be used by cpu_callback. + */ +static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) +{ + if (!(gcwq->trustee_state == state || + gcwq->trustee_state == TRUSTEE_DONE)) { + spin_unlock_irq(&gcwq->lock); + __wait_event(gcwq->trustee_wait, + gcwq->trustee_state == state || + gcwq->trustee_state == TRUSTEE_DONE); + spin_lock_irq(&gcwq->lock); + } +} + static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct cpu_workqueue_struct *cwq; - struct workqueue_struct *wq; + struct global_cwq *gcwq = get_gcwq(cpu); + struct task_struct *new_trustee = NULL; + struct worker *worker; + struct hlist_node *pos; + unsigned long flags; + int i; action &= ~CPU_TASKS_FROZEN; - list_for_each_entry(wq, &workqueues, list) { - if (wq->flags & WQ_SINGLE_THREAD) - continue; + switch (action) { + case CPU_DOWN_PREPARE: + new_trustee = kthread_create(trustee_thread, gcwq, + "workqueue_trustee/%d\n", cpu); + if (IS_ERR(new_trustee)) + return notifier_from_errno(PTR_ERR(new_trustee)); + kthread_bind(new_trustee, cpu); + } - cwq = get_cwq(cpu, wq); + /* some are called w/ irq disabled, don't disturb irq status */ + spin_lock_irqsave(&gcwq->lock, flags); - switch (action) { - case CPU_POST_DEAD: - flush_workqueue(wq); - break; + switch (action) { + case CPU_DOWN_PREPARE: + /* initialize trustee and tell it to acquire the gcwq */ + BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE); + gcwq->trustee = new_trustee; + gcwq->trustee_state = TRUSTEE_START; + wake_up_process(gcwq->trustee); + wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE); + break; + + case CPU_POST_DEAD: + gcwq->trustee_state = TRUSTEE_BUTCHER; + break; + + case CPU_DOWN_FAILED: + case CPU_ONLINE: + if (gcwq->trustee_state != TRUSTEE_DONE) { + gcwq->trustee_state = TRUSTEE_RELEASE; + wake_up_process(gcwq->trustee); + wait_trustee_state(gcwq, TRUSTEE_DONE); } + + /* clear ROGUE from all multithread workers */ + list_for_each_entry(worker, &gcwq->idle_list, entry) + if (!(worker->cwq->wq->flags & WQ_SINGLE_THREAD)) + worker->flags &= ~WORKER_ROGUE; + + for_each_busy_worker(worker, i, pos, gcwq) + if (!(worker->cwq->wq->flags & WQ_SINGLE_THREAD)) + worker->flags &= ~WORKER_ROGUE; + break; } + spin_unlock_irqrestore(&gcwq->lock, flags); + return notifier_from_errno(0); } @@ -1912,6 +2164,9 @@ void freeze_workqueues_begin(void) spin_lock_irq(&gcwq->lock); + BUG_ON(gcwq->flags & GCWQ_FREEZING); + gcwq->flags |= GCWQ_FREEZING; + list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); @@ -1995,6 +2250,9 @@ void thaw_workqueues(void) spin_lock_irq(&gcwq->lock); + BUG_ON(!(gcwq->flags & GCWQ_FREEZING)); + gcwq->flags &= ~GCWQ_FREEZING; + list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); @@ -2026,7 +2284,7 @@ void __init init_workqueues(void) int i; singlethread_cpu = cpumask_first(cpu_possible_mask); - hotcpu_notifier(workqueue_cpu_callback, 0); + hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); /* initialize gcwqs */ for_each_possible_cpu(cpu) { @@ -2040,6 +2298,9 @@ void __init init_workqueues(void) INIT_HLIST_HEAD(&gcwq->busy_hash[i]); ida_init(&gcwq->worker_ida); + + gcwq->trustee_state = TRUSTEE_DONE; + init_waitqueue_head(&gcwq->trustee_wait); } keventd_wq = create_workqueue("events"); -- cgit v1.1 From 502ca9d819792e7d79b6e002afe9094c641fe410 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:13 +0200 Subject: workqueue: make single thread workqueue shared worker pool friendly Reimplement st (single thread) workqueue so that it's friendly to shared worker pool. It was originally implemented by confining st workqueues to use cwq of a fixed cpu and always having a worker for the cpu. This implementation isn't very friendly to shared worker pool and suboptimal in that it ends up crossing cpu boundaries often. Reimplement st workqueue using dynamic single cpu binding and cwq->limit. WQ_SINGLE_THREAD is replaced with WQ_SINGLE_CPU. In a single cpu workqueue, at most single cwq is bound to the wq at any given time. Arbitration is done using atomic accesses to wq->single_cpu when queueing a work. Once bound, the binding stays till the workqueue is drained. Note that the binding is never broken while a workqueue is frozen. This is because idle cwqs may have works waiting in delayed_works queue while frozen. On thaw, the cwq is restarted if there are any delayed works or unbound otherwise. When combined with max_active limit of 1, single cpu workqueue has exactly the same execution properties as the original single thread workqueue while allowing sharing of per-cpu workers. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 135 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 100 insertions(+), 35 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f57855f..cfb8aa5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -114,8 +114,7 @@ struct global_cwq { } ____cacheline_aligned_in_smp; /* - * The per-CPU workqueue (if single thread, we always use the first - * possible cpu). The lower WORK_STRUCT_FLAG_BITS of + * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of * work_struct->data are used for flags and thus cwqs need to be * aligned at two's power of the number of flag bits. */ @@ -159,6 +158,8 @@ struct workqueue_struct { struct list_head flusher_queue; /* F: flush waiters */ struct list_head flusher_overflow; /* F: flush overflow list */ + unsigned long single_cpu; /* cpu for single cpu wq */ + int saved_max_active; /* I: saved cwq max_active */ const char *name; /* I: workqueue name */ #ifdef CONFIG_LOCKDEP @@ -289,8 +290,6 @@ static DEFINE_PER_CPU(struct global_cwq, global_cwq); static int worker_thread(void *__worker); -static int singlethread_cpu __read_mostly; - static struct global_cwq *get_gcwq(unsigned int cpu) { return &per_cpu(global_cwq, cpu); @@ -302,14 +301,6 @@ static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, return per_cpu_ptr(wq->cpu_wq, cpu); } -static struct cpu_workqueue_struct *target_cwq(unsigned int cpu, - struct workqueue_struct *wq) -{ - if (unlikely(wq->flags & WQ_SINGLE_THREAD)) - cpu = singlethread_cpu; - return get_cwq(cpu, wq); -} - static unsigned int work_color_to_flags(int color) { return color << WORK_STRUCT_COLOR_SHIFT; @@ -410,17 +401,87 @@ static void insert_work(struct cpu_workqueue_struct *cwq, wake_up_process(cwq->worker->task); } +/** + * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing + * @cwq: cwq to unbind + * + * Try to unbind @cwq from single cpu workqueue processing. If + * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock). + */ +static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq) +{ + struct workqueue_struct *wq = cwq->wq; + struct global_cwq *gcwq = cwq->gcwq; + + BUG_ON(wq->single_cpu != gcwq->cpu); + /* + * Unbind from workqueue if @cwq is not frozen. If frozen, + * thaw_workqueues() will either restart processing on this + * cpu or unbind if empty. This keeps works queued while + * frozen fully ordered and flushable. + */ + if (likely(!(gcwq->flags & GCWQ_FREEZING))) { + smp_wmb(); /* paired with cmpxchg() in __queue_work() */ + wq->single_cpu = NR_CPUS; + } +} + static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct work_struct *work) { - struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq); - struct global_cwq *gcwq = cwq->gcwq; + struct global_cwq *gcwq; + struct cpu_workqueue_struct *cwq; struct list_head *worklist; unsigned long flags; + bool arbitrate; debug_work_activate(work); - spin_lock_irqsave(&gcwq->lock, flags); + /* determine gcwq to use */ + if (!(wq->flags & WQ_SINGLE_CPU)) { + /* just use the requested cpu for multicpu workqueues */ + gcwq = get_gcwq(cpu); + spin_lock_irqsave(&gcwq->lock, flags); + } else { + unsigned int req_cpu = cpu; + + /* + * It's a bit more complex for single cpu workqueues. + * We first need to determine which cpu is going to be + * used. If no cpu is currently serving this + * workqueue, arbitrate using atomic accesses to + * wq->single_cpu; otherwise, use the current one. + */ + retry: + cpu = wq->single_cpu; + arbitrate = cpu == NR_CPUS; + if (arbitrate) + cpu = req_cpu; + + gcwq = get_gcwq(cpu); + spin_lock_irqsave(&gcwq->lock, flags); + + /* + * The following cmpxchg() is a full barrier paired + * with smp_wmb() in cwq_unbind_single_cpu() and + * guarantees that all changes to wq->st_* fields are + * visible on the new cpu after this point. + */ + if (arbitrate) + cmpxchg(&wq->single_cpu, NR_CPUS, cpu); + + if (unlikely(wq->single_cpu != cpu)) { + spin_unlock_irqrestore(&gcwq->lock, flags); + goto retry; + } + } + + /* gcwq determined, get cwq and queue */ + cwq = get_cwq(gcwq->cpu, wq); + BUG_ON(!list_empty(&work->entry)); cwq->nr_in_flight[cwq->work_color]++; @@ -530,7 +591,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, timer_stats_timer_set_start_info(&dwork->timer); /* This stores cwq for the moment, for the timer_fn */ - set_wq_data(work, target_cwq(raw_smp_processor_id(), wq), 0); + set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0); timer->expires = jiffies + delay; timer->data = (unsigned long)dwork; timer->function = delayed_work_timer_fn; @@ -790,10 +851,14 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) cwq->nr_in_flight[color]--; cwq->nr_active--; - /* one down, submit a delayed one */ - if (!list_empty(&cwq->delayed_works) && - cwq->nr_active < cwq->max_active) - cwq_activate_first_delayed(cwq); + if (!list_empty(&cwq->delayed_works)) { + /* one down, submit a delayed one */ + if (cwq->nr_active < cwq->max_active) + cwq_activate_first_delayed(cwq); + } else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) { + /* this was the last work, unbind from single cpu */ + cwq_unbind_single_cpu(cwq); + } /* is flush in progress and are we at the flushing tip? */ if (likely(cwq->flush_color != color)) @@ -1727,7 +1792,6 @@ struct workqueue_struct *__create_workqueue_key(const char *name, struct lock_class_key *key, const char *lock_name) { - bool singlethread = flags & WQ_SINGLE_THREAD; struct workqueue_struct *wq; bool failed = false; unsigned int cpu; @@ -1748,6 +1812,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name, atomic_set(&wq->nr_cwqs_to_flush, 0); INIT_LIST_HEAD(&wq->flusher_queue); INIT_LIST_HEAD(&wq->flusher_overflow); + wq->single_cpu = NR_CPUS; + wq->name = name; lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); INIT_LIST_HEAD(&wq->list); @@ -1773,8 +1839,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, if (failed) continue; - cwq->worker = create_worker(cwq, - cpu_online(cpu) && !singlethread); + cwq->worker = create_worker(cwq, cpu_online(cpu)); if (cwq->worker) start_worker(cwq->worker); else @@ -1958,18 +2023,16 @@ static int __cpuinit trustee_thread(void *__gcwq) spin_lock_irq(&gcwq->lock); /* - * Make all multithread workers rogue. Trustee must be bound - * to the target cpu and can't be cancelled. + * Make all workers rogue. Trustee must be bound to the + * target cpu and can't be cancelled. */ BUG_ON(gcwq->cpu != smp_processor_id()); list_for_each_entry(worker, &gcwq->idle_list, entry) - if (!(worker->cwq->wq->flags & WQ_SINGLE_THREAD)) - worker->flags |= WORKER_ROGUE; + worker->flags |= WORKER_ROGUE; for_each_busy_worker(worker, i, pos, gcwq) - if (!(worker->cwq->wq->flags & WQ_SINGLE_THREAD)) - worker->flags |= WORKER_ROGUE; + worker->flags |= WORKER_ROGUE; /* * We're now in charge. Notify and proceed to drain. We need @@ -2074,14 +2137,12 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, wait_trustee_state(gcwq, TRUSTEE_DONE); } - /* clear ROGUE from all multithread workers */ + /* clear ROGUE from all workers */ list_for_each_entry(worker, &gcwq->idle_list, entry) - if (!(worker->cwq->wq->flags & WQ_SINGLE_THREAD)) - worker->flags &= ~WORKER_ROGUE; + worker->flags &= ~WORKER_ROGUE; for_each_busy_worker(worker, i, pos, gcwq) - if (!(worker->cwq->wq->flags & WQ_SINGLE_THREAD)) - worker->flags &= ~WORKER_ROGUE; + worker->flags &= ~WORKER_ROGUE; break; } @@ -2266,6 +2327,11 @@ void thaw_workqueues(void) cwq->nr_active < cwq->max_active) cwq_activate_first_delayed(cwq); + /* perform delayed unbind from single cpu if empty */ + if (wq->single_cpu == gcwq->cpu && + !cwq->nr_active && list_empty(&cwq->delayed_works)) + cwq_unbind_single_cpu(cwq); + wake_up_process(cwq->worker->task); } @@ -2283,7 +2349,6 @@ void __init init_workqueues(void) unsigned int cpu; int i; - singlethread_cpu = cpumask_first(cpu_possible_mask); hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); /* initialize gcwqs */ -- cgit v1.1 From 8cca0eea3964b72b14e8c3f88e3a40bef7b9113e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:13 +0200 Subject: workqueue: add find_worker_executing_work() and track current_cwq Now that all the workers are tracked by gcwq, we can find which worker is executing a work from gcwq. Implement find_worker_executing_work() and make worker track its current_cwq so that we can find things the other way around. This will be used to implement non-reentrant wqs. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index cfb8aa5..c276dec 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -82,6 +82,7 @@ struct worker { }; struct work_struct *current_work; /* L: work being processed */ + struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ struct list_head scheduled; /* L: scheduled works */ struct task_struct *task; /* I: worker task */ struct global_cwq *gcwq; /* I: the associated gcwq */ @@ -373,6 +374,59 @@ static struct hlist_head *busy_worker_head(struct global_cwq *gcwq, } /** + * __find_worker_executing_work - find worker which is executing a work + * @gcwq: gcwq of interest + * @bwh: hash head as returned by busy_worker_head() + * @work: work to find worker for + * + * Find a worker which is executing @work on @gcwq. @bwh should be + * the hash head obtained by calling busy_worker_head() with the same + * work. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock). + * + * RETURNS: + * Pointer to worker which is executing @work if found, NULL + * otherwise. + */ +static struct worker *__find_worker_executing_work(struct global_cwq *gcwq, + struct hlist_head *bwh, + struct work_struct *work) +{ + struct worker *worker; + struct hlist_node *tmp; + + hlist_for_each_entry(worker, tmp, bwh, hentry) + if (worker->current_work == work) + return worker; + return NULL; +} + +/** + * find_worker_executing_work - find worker which is executing a work + * @gcwq: gcwq of interest + * @work: work to find worker for + * + * Find a worker which is executing @work on @gcwq. This function is + * identical to __find_worker_executing_work() except that this + * function calculates @bwh itself. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock). + * + * RETURNS: + * Pointer to worker which is executing @work if found, NULL + * otherwise. + */ +static struct worker *find_worker_executing_work(struct global_cwq *gcwq, + struct work_struct *work) +{ + return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work), + work); +} + +/** * insert_work - insert a work into cwq * @cwq: cwq @work belongs to * @work: work to insert @@ -914,6 +968,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) debug_work_deactivate(work); hlist_add_head(&worker->hentry, bwh); worker->current_work = work; + worker->current_cwq = cwq; work_color = get_work_color(work); list_del_init(&work->entry); @@ -942,6 +997,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) /* we're done with it, release */ hlist_del_init(&worker->hentry); worker->current_work = NULL; + worker->current_cwq = NULL; cwq_dec_nr_in_flight(cwq, work_color); } -- cgit v1.1 From 7a22ad757ec75186ad43a5b4670fa7423ee8f480 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:13 +0200 Subject: workqueue: carry cpu number in work data once execution starts To implement non-reentrant workqueue, the last gcwq a work was executed on must be reliably obtainable as long as the work structure is valid even if the previous workqueue has been destroyed. To achieve this, work->data will be overloaded to carry the last cpu number once execution starts so that the previous gcwq can be located reliably. This means that cwq can't be obtained from work after execution starts but only gcwq. Implement set_work_{cwq|cpu}(), get_work_[g]cwq() and clear_work_data() to set work data to the cpu number when starting execution, access the overloaded work data and clear it after cancellation. queue_delayed_work_on() is updated to preserve the last cpu while in-flight in timer and other callers which depended on getting cwq from work after execution starts are converted to depend on gcwq instead. * Anton Blanchard fixed compile error on powerpc due to missing linux/threads.h include. Signed-off-by: Tejun Heo Cc: Anton Blanchard --- kernel/workqueue.c | 163 ++++++++++++++++++++++++++++++++++------------------- 1 file changed, 104 insertions(+), 59 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c276dec..c68277c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -319,31 +319,71 @@ static int work_next_color(int color) } /* - * Set the workqueue on which a work item is to be run - * - Must *only* be called if the pending flag is set + * Work data points to the cwq while a work is on queue. Once + * execution starts, it points to the cpu the work was last on. This + * can be distinguished by comparing the data value against + * PAGE_OFFSET. + * + * set_work_{cwq|cpu}() and clear_work_data() can be used to set the + * cwq, cpu or clear work->data. These functions should only be + * called while the work is owned - ie. while the PENDING bit is set. + * + * get_work_[g]cwq() can be used to obtain the gcwq or cwq + * corresponding to a work. gcwq is available once the work has been + * queued anywhere after initialization. cwq is available only from + * queueing until execution starts. */ -static inline void set_wq_data(struct work_struct *work, - struct cpu_workqueue_struct *cwq, - unsigned long extra_flags) +static inline void set_work_data(struct work_struct *work, unsigned long data, + unsigned long flags) { BUG_ON(!work_pending(work)); + atomic_long_set(&work->data, data | flags | work_static(work)); +} - atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) | - WORK_STRUCT_PENDING | extra_flags); +static void set_work_cwq(struct work_struct *work, + struct cpu_workqueue_struct *cwq, + unsigned long extra_flags) +{ + set_work_data(work, (unsigned long)cwq, + WORK_STRUCT_PENDING | extra_flags); } -/* - * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued. - */ -static inline void clear_wq_data(struct work_struct *work) +static void set_work_cpu(struct work_struct *work, unsigned int cpu) +{ + set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING); +} + +static void clear_work_data(struct work_struct *work) +{ + set_work_data(work, WORK_STRUCT_NO_CPU, 0); +} + +static inline unsigned long get_work_data(struct work_struct *work) +{ + return atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK; +} + +static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work) { - atomic_long_set(&work->data, work_static(work)); + unsigned long data = get_work_data(work); + + return data >= PAGE_OFFSET ? (void *)data : NULL; } -static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) +static struct global_cwq *get_work_gcwq(struct work_struct *work) { - return (void *)(atomic_long_read(&work->data) & - WORK_STRUCT_WQ_DATA_MASK); + unsigned long data = get_work_data(work); + unsigned int cpu; + + if (data >= PAGE_OFFSET) + return ((struct cpu_workqueue_struct *)data)->gcwq; + + cpu = data >> WORK_STRUCT_FLAG_BITS; + if (cpu == NR_CPUS) + return NULL; + + BUG_ON(cpu >= num_possible_cpus()); + return get_gcwq(cpu); } /** @@ -443,7 +483,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq, unsigned int extra_flags) { /* we own @work, set data and link */ - set_wq_data(work, cwq, extra_flags); + set_work_cwq(work, cwq, extra_flags); /* * Ensure that we get the right work->data if we see the @@ -599,7 +639,7 @@ EXPORT_SYMBOL_GPL(queue_work_on); static void delayed_work_timer_fn(unsigned long __data) { struct delayed_work *dwork = (struct delayed_work *)__data; - struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); + struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work); __queue_work(smp_processor_id(), cwq->wq, &dwork->work); } @@ -639,13 +679,19 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work = &dwork->work; if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + struct global_cwq *gcwq = get_work_gcwq(work); + unsigned int lcpu = gcwq ? gcwq->cpu : raw_smp_processor_id(); + BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); timer_stats_timer_set_start_info(&dwork->timer); - - /* This stores cwq for the moment, for the timer_fn */ - set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0); + /* + * This stores cwq for the moment, for the timer_fn. + * Note that the work's gcwq is preserved to allow + * reentrance detection for delayed works. + */ + set_work_cwq(work, get_cwq(lcpu, wq), 0); timer->expires = jiffies + delay; timer->data = (unsigned long)dwork; timer->function = delayed_work_timer_fn; @@ -970,11 +1016,14 @@ static void process_one_work(struct worker *worker, struct work_struct *work) worker->current_work = work; worker->current_cwq = cwq; work_color = get_work_color(work); + + BUG_ON(get_work_cwq(work) != cwq); + /* record the current cpu number in the work data and dequeue */ + set_work_cpu(work, gcwq->cpu); list_del_init(&work->entry); spin_unlock_irq(&gcwq->lock); - BUG_ON(get_wq_data(work) != cwq); work_clear_pending(work); lock_map_acquire(&cwq->wq->lockdep_map); lock_map_acquire(&lockdep_map); @@ -1406,37 +1455,39 @@ EXPORT_SYMBOL_GPL(flush_workqueue); int flush_work(struct work_struct *work) { struct worker *worker = NULL; - struct cpu_workqueue_struct *cwq; struct global_cwq *gcwq; + struct cpu_workqueue_struct *cwq; struct wq_barrier barr; might_sleep(); - cwq = get_wq_data(work); - if (!cwq) + gcwq = get_work_gcwq(work); + if (!gcwq) return 0; - gcwq = cwq->gcwq; - - lock_map_acquire(&cwq->wq->lockdep_map); - lock_map_release(&cwq->wq->lockdep_map); spin_lock_irq(&gcwq->lock); if (!list_empty(&work->entry)) { /* * See the comment near try_to_grab_pending()->smp_rmb(). - * If it was re-queued under us we are not going to wait. + * If it was re-queued to a different gcwq under us, we + * are not going to wait. */ smp_rmb(); - if (unlikely(cwq != get_wq_data(work))) + cwq = get_work_cwq(work); + if (unlikely(!cwq || gcwq != cwq->gcwq)) goto already_gone; } else { - if (cwq->worker && cwq->worker->current_work == work) - worker = cwq->worker; + worker = find_worker_executing_work(gcwq, work); if (!worker) goto already_gone; + cwq = worker->current_cwq; } insert_wq_barrier(cwq, &barr, work, worker); spin_unlock_irq(&gcwq->lock); + + lock_map_acquire(&cwq->wq->lockdep_map); + lock_map_release(&cwq->wq->lockdep_map); + wait_for_completion(&barr.done); destroy_work_on_stack(&barr.work); return 1; @@ -1453,7 +1504,6 @@ EXPORT_SYMBOL_GPL(flush_work); static int try_to_grab_pending(struct work_struct *work) { struct global_cwq *gcwq; - struct cpu_workqueue_struct *cwq; int ret = -1; if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) @@ -1463,24 +1513,23 @@ static int try_to_grab_pending(struct work_struct *work) * The queueing is in progress, or it is already queued. Try to * steal it from ->worklist without clearing WORK_STRUCT_PENDING. */ - - cwq = get_wq_data(work); - if (!cwq) + gcwq = get_work_gcwq(work); + if (!gcwq) return ret; - gcwq = cwq->gcwq; spin_lock_irq(&gcwq->lock); if (!list_empty(&work->entry)) { /* - * This work is queued, but perhaps we locked the wrong cwq. + * This work is queued, but perhaps we locked the wrong gcwq. * In that case we must see the new value after rmb(), see * insert_work()->wmb(). */ smp_rmb(); - if (cwq == get_wq_data(work)) { + if (gcwq == get_work_gcwq(work)) { debug_work_deactivate(work); list_del_init(&work->entry); - cwq_dec_nr_in_flight(cwq, get_work_color(work)); + cwq_dec_nr_in_flight(get_work_cwq(work), + get_work_color(work)); ret = 1; } } @@ -1489,20 +1538,16 @@ static int try_to_grab_pending(struct work_struct *work) return ret; } -static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, - struct work_struct *work) +static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work) { - struct global_cwq *gcwq = cwq->gcwq; struct wq_barrier barr; struct worker *worker; spin_lock_irq(&gcwq->lock); - worker = NULL; - if (unlikely(cwq->worker && cwq->worker->current_work == work)) { - worker = cwq->worker; - insert_wq_barrier(cwq, &barr, work, worker); - } + worker = find_worker_executing_work(gcwq, work); + if (unlikely(worker)) + insert_wq_barrier(worker->current_cwq, &barr, work, worker); spin_unlock_irq(&gcwq->lock); @@ -1514,8 +1559,6 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, static void wait_on_work(struct work_struct *work) { - struct cpu_workqueue_struct *cwq; - struct workqueue_struct *wq; int cpu; might_sleep(); @@ -1523,14 +1566,8 @@ static void wait_on_work(struct work_struct *work) lock_map_acquire(&work->lockdep_map); lock_map_release(&work->lockdep_map); - cwq = get_wq_data(work); - if (!cwq) - return; - - wq = cwq->wq; - for_each_possible_cpu(cpu) - wait_on_cpu_work(get_cwq(cpu, wq), work); + wait_on_cpu_work(get_gcwq(cpu), work); } static int __cancel_work_timer(struct work_struct *work, @@ -1545,7 +1582,7 @@ static int __cancel_work_timer(struct work_struct *work, wait_on_work(work); } while (unlikely(ret < 0)); - clear_wq_data(work); + clear_work_data(work); return ret; } @@ -1647,7 +1684,7 @@ EXPORT_SYMBOL(schedule_delayed_work); void flush_delayed_work(struct delayed_work *dwork) { if (del_timer_sync(&dwork->timer)) { - __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq, + __queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq, &dwork->work); put_cpu(); } @@ -2405,6 +2442,14 @@ void __init init_workqueues(void) unsigned int cpu; int i; + /* + * The pointer part of work->data is either pointing to the + * cwq or contains the cpu number the work ran last on. Make + * sure cpu number won't overflow into kernel pointer area so + * that they can be distinguished. + */ + BUILD_BUG_ON(NR_CPUS << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET); + hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); /* initialize gcwqs */ -- cgit v1.1 From 18aa9effad4adb2c1efe123af4eb24fec9f59b30 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:13 +0200 Subject: workqueue: implement WQ_NON_REENTRANT With gcwq managing all the workers and work->data pointing to the last gcwq it was on, non-reentrance can be easily implemented by checking whether the work is still running on the previous gcwq on queueing. Implement it. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c68277c..bce1074 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -534,11 +534,37 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, debug_work_activate(work); - /* determine gcwq to use */ + /* + * Determine gcwq to use. SINGLE_CPU is inherently + * NON_REENTRANT, so test it first. + */ if (!(wq->flags & WQ_SINGLE_CPU)) { - /* just use the requested cpu for multicpu workqueues */ + struct global_cwq *last_gcwq; + + /* + * It's multi cpu. If @wq is non-reentrant and @work + * was previously on a different cpu, it might still + * be running there, in which case the work needs to + * be queued on that cpu to guarantee non-reentrance. + */ gcwq = get_gcwq(cpu); - spin_lock_irqsave(&gcwq->lock, flags); + if (wq->flags & WQ_NON_REENTRANT && + (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) { + struct worker *worker; + + spin_lock_irqsave(&last_gcwq->lock, flags); + + worker = find_worker_executing_work(last_gcwq, work); + + if (worker && worker->current_cwq->wq == wq) + gcwq = last_gcwq; + else { + /* meh... not running there, queue here */ + spin_unlock_irqrestore(&last_gcwq->lock, flags); + spin_lock_irqsave(&gcwq->lock, flags); + } + } else + spin_lock_irqsave(&gcwq->lock, flags); } else { unsigned int req_cpu = cpu; -- cgit v1.1 From 7e11629d0efec829cbf62366143ba1081944a70e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:13 +0200 Subject: workqueue: use shared worklist and pool all workers per cpu Use gcwq->worklist instead of cwq->worklist and break the strict association between a cwq and its worker. All works queued on a cpu are queued on gcwq->worklist and processed by any available worker on the gcwq. As there no longer is strict association between a cwq and its worker, whether a work is executing can now only be determined by calling [__]find_worker_executing_work(). After this change, the only association between a cwq and its worker is that a cwq puts a worker into shared worker pool on creation and kills it on destruction. As all workqueues are still limited to max_active of one, this means that there are always at least as many workers as active works and thus there's no danger for deadlock. The break of strong association between cwqs and workers requires somewhat clumsy changes to current_is_keventd() and destroy_workqueue(). Dynamic worker pool management will remove both clumsy changes. current_is_keventd() won't be necessary at all as the only reason it exists is to avoid queueing a work from a work which will be allowed just fine. The clumsy part of destroy_workqueue() is added because a worker can only be destroyed while idle and there's no guarantee a worker is idle when its wq is going down. With dynamic pool management, workers are not associated with workqueues at all and only idle ones will be submitted to destroy_workqueue() so the code won't be necessary anymore. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 131 ++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 99 insertions(+), 32 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index bce1074..e697d6c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -34,6 +34,7 @@ #include #include #include +#include enum { /* global_cwq flags */ @@ -72,7 +73,6 @@ enum { */ struct global_cwq; -struct cpu_workqueue_struct; struct worker { /* on idle list while idle, on busy hash table while busy */ @@ -86,7 +86,6 @@ struct worker { struct list_head scheduled; /* L: scheduled works */ struct task_struct *task; /* I: worker task */ struct global_cwq *gcwq; /* I: the associated gcwq */ - struct cpu_workqueue_struct *cwq; /* I: the associated cwq */ unsigned int flags; /* L: flags */ int id; /* I: worker id */ }; @@ -96,6 +95,7 @@ struct worker { */ struct global_cwq { spinlock_t lock; /* the gcwq lock */ + struct list_head worklist; /* L: list of pending works */ unsigned int cpu; /* I: the associated cpu */ unsigned int flags; /* L: GCWQ_* flags */ @@ -121,7 +121,6 @@ struct global_cwq { */ struct cpu_workqueue_struct { struct global_cwq *gcwq; /* I: the associated gcwq */ - struct list_head worklist; struct worker *worker; struct workqueue_struct *wq; /* I: the owning workqueue */ int work_color; /* L: current color */ @@ -386,6 +385,32 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work) return get_gcwq(cpu); } +/* Return the first worker. Safe with preemption disabled */ +static struct worker *first_worker(struct global_cwq *gcwq) +{ + if (unlikely(list_empty(&gcwq->idle_list))) + return NULL; + + return list_first_entry(&gcwq->idle_list, struct worker, entry); +} + +/** + * wake_up_worker - wake up an idle worker + * @gcwq: gcwq to wake worker for + * + * Wake up the first idle worker of @gcwq. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock). + */ +static void wake_up_worker(struct global_cwq *gcwq) +{ + struct worker *worker = first_worker(gcwq); + + if (likely(worker)) + wake_up_process(worker->task); +} + /** * busy_worker_head - return the busy hash head for a work * @gcwq: gcwq of interest @@ -467,13 +492,14 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq, } /** - * insert_work - insert a work into cwq + * insert_work - insert a work into gcwq * @cwq: cwq @work belongs to * @work: work to insert * @head: insertion point * @extra_flags: extra WORK_STRUCT_* flags to set * - * Insert @work into @cwq after @head. + * Insert @work which belongs to @cwq into @gcwq after @head. + * @extra_flags is or'd to work_struct flags. * * CONTEXT: * spin_lock_irq(gcwq->lock). @@ -492,7 +518,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq, smp_wmb(); list_add_tail(&work->entry, head); - wake_up_process(cwq->worker->task); + wake_up_worker(cwq->gcwq); } /** @@ -608,7 +634,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, if (likely(cwq->nr_active < cwq->max_active)) { cwq->nr_active++; - worklist = &cwq->worklist; + worklist = &gcwq->worklist; } else worklist = &cwq->delayed_works; @@ -793,10 +819,10 @@ static struct worker *alloc_worker(void) /** * create_worker - create a new workqueue worker - * @cwq: cwq the new worker will belong to + * @gcwq: gcwq the new worker will belong to * @bind: whether to set affinity to @cpu or not * - * Create a new worker which is bound to @cwq. The returned worker + * Create a new worker which is bound to @gcwq. The returned worker * can be started by calling start_worker() or destroyed using * destroy_worker(). * @@ -806,9 +832,8 @@ static struct worker *alloc_worker(void) * RETURNS: * Pointer to the newly created worker. */ -static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind) +static struct worker *create_worker(struct global_cwq *gcwq, bool bind) { - struct global_cwq *gcwq = cwq->gcwq; int id = -1; struct worker *worker = NULL; @@ -826,7 +851,6 @@ static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind) goto fail; worker->gcwq = gcwq; - worker->cwq = cwq; worker->id = id; worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d", @@ -953,7 +977,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) struct work_struct *work = list_first_entry(&cwq->delayed_works, struct work_struct, entry); - move_linked_works(work, &cwq->worklist, NULL); + move_linked_works(work, &cwq->gcwq->worklist, NULL); cwq->nr_active++; } @@ -1021,11 +1045,12 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) */ static void process_one_work(struct worker *worker, struct work_struct *work) { - struct cpu_workqueue_struct *cwq = worker->cwq; + struct cpu_workqueue_struct *cwq = get_work_cwq(work); struct global_cwq *gcwq = cwq->gcwq; struct hlist_head *bwh = busy_worker_head(gcwq, work); work_func_t f = work->func; int work_color; + struct worker *collision; #ifdef CONFIG_LOCKDEP /* * It is permissible to free the struct work_struct from @@ -1036,6 +1061,18 @@ static void process_one_work(struct worker *worker, struct work_struct *work) */ struct lockdep_map lockdep_map = work->lockdep_map; #endif + /* + * A single work shouldn't be executed concurrently by + * multiple workers on a single cpu. Check whether anyone is + * already processing the work. If so, defer the work to the + * currently executing one. + */ + collision = __find_worker_executing_work(gcwq, bwh, work); + if (unlikely(collision)) { + move_linked_works(work, &collision->scheduled, NULL); + return; + } + /* claim and process */ debug_work_deactivate(work); hlist_add_head(&worker->hentry, bwh); @@ -1043,7 +1080,6 @@ static void process_one_work(struct worker *worker, struct work_struct *work) worker->current_cwq = cwq; work_color = get_work_color(work); - BUG_ON(get_work_cwq(work) != cwq); /* record the current cpu number in the work data and dequeue */ set_work_cpu(work, gcwq->cpu); list_del_init(&work->entry); @@ -1107,7 +1143,6 @@ static int worker_thread(void *__worker) { struct worker *worker = __worker; struct global_cwq *gcwq = worker->gcwq; - struct cpu_workqueue_struct *cwq = worker->cwq; woke_up: spin_lock_irq(&gcwq->lock); @@ -1127,9 +1162,9 @@ recheck: */ BUG_ON(!list_empty(&worker->scheduled)); - while (!list_empty(&cwq->worklist)) { + while (!list_empty(&gcwq->worklist)) { struct work_struct *work = - list_first_entry(&cwq->worklist, + list_first_entry(&gcwq->worklist, struct work_struct, entry); /* @@ -1844,18 +1879,37 @@ int keventd_up(void) int current_is_keventd(void) { - struct cpu_workqueue_struct *cwq; - int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */ - int ret = 0; + bool found = false; + unsigned int cpu; - BUG_ON(!keventd_wq); + /* + * There no longer is one-to-one relation between worker and + * work queue and a worker task might be unbound from its cpu + * if the cpu was offlined. Match all busy workers. This + * function will go away once dynamic pool is implemented. + */ + for_each_possible_cpu(cpu) { + struct global_cwq *gcwq = get_gcwq(cpu); + struct worker *worker; + struct hlist_node *pos; + unsigned long flags; + int i; - cwq = get_cwq(cpu, keventd_wq); - if (current == cwq->worker->task) - ret = 1; + spin_lock_irqsave(&gcwq->lock, flags); - return ret; + for_each_busy_worker(worker, i, pos, gcwq) { + if (worker->task == current) { + found = true; + break; + } + } + + spin_unlock_irqrestore(&gcwq->lock, flags); + if (found) + break; + } + return found; } static struct cpu_workqueue_struct *alloc_cwqs(void) @@ -1953,12 +2007,11 @@ struct workqueue_struct *__create_workqueue_key(const char *name, cwq->wq = wq; cwq->flush_color = -1; cwq->max_active = max_active; - INIT_LIST_HEAD(&cwq->worklist); INIT_LIST_HEAD(&cwq->delayed_works); if (failed) continue; - cwq->worker = create_worker(cwq, cpu_online(cpu)); + cwq->worker = create_worker(gcwq, cpu_online(cpu)); if (cwq->worker) start_worker(cwq->worker); else @@ -2020,13 +2073,26 @@ void destroy_workqueue(struct workqueue_struct *wq) for_each_possible_cpu(cpu) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + struct global_cwq *gcwq = cwq->gcwq; int i; if (cwq->worker) { - spin_lock_irq(&cwq->gcwq->lock); + retry: + spin_lock_irq(&gcwq->lock); + /* + * Worker can only be destroyed while idle. + * Wait till it becomes idle. This is ugly + * and prone to starvation. It will go away + * once dynamic worker pool is implemented. + */ + if (!(cwq->worker->flags & WORKER_IDLE)) { + spin_unlock_irq(&gcwq->lock); + msleep(100); + goto retry; + } destroy_worker(cwq->worker); cwq->worker = NULL; - spin_unlock_irq(&cwq->gcwq->lock); + spin_unlock_irq(&gcwq->lock); } for (i = 0; i < WORK_NR_COLORS; i++) @@ -2324,7 +2390,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu); * * Start freezing workqueues. After this function returns, all * freezeable workqueues will queue new works to their frozen_works - * list instead of the cwq ones. + * list instead of gcwq->worklist. * * CONTEXT: * Grabs and releases workqueue_lock and gcwq->lock's. @@ -2410,7 +2476,7 @@ out_unlock: * thaw_workqueues - thaw workqueues * * Thaw workqueues. Normal queueing is restored and all collected - * frozen works are transferred to their respective cwq worklists. + * frozen works are transferred to their respective gcwq worklists. * * CONTEXT: * Grabs and releases workqueue_lock and gcwq->lock's. @@ -2483,6 +2549,7 @@ void __init init_workqueues(void) struct global_cwq *gcwq = get_gcwq(cpu); spin_lock_init(&gcwq->lock); + INIT_LIST_HEAD(&gcwq->worklist); gcwq->cpu = cpu; INIT_LIST_HEAD(&gcwq->idle_list); -- cgit v1.1 From d302f0178223802a1e496ba90c66193b7721c9c1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:13 +0200 Subject: workqueue: implement worker_{set|clr}_flags() Implement worker_{set|clr}_flags() to manipulate worker flags. These are currently simple wrappers but logics to track the current worker state and the current level of concurrency will be added. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 48 ++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 40 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e697d6c..4c31fde 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -412,6 +412,38 @@ static void wake_up_worker(struct global_cwq *gcwq) } /** + * worker_set_flags - set worker flags + * @worker: worker to set flags for + * @flags: flags to set + * @wakeup: wakeup an idle worker if necessary + * + * Set @flags in @worker->flags. + * + * LOCKING: + * spin_lock_irq(gcwq->lock). + */ +static inline void worker_set_flags(struct worker *worker, unsigned int flags, + bool wakeup) +{ + worker->flags |= flags; +} + +/** + * worker_clr_flags - clear worker flags + * @worker: worker to set flags for + * @flags: flags to clear + * + * Clear @flags in @worker->flags. + * + * LOCKING: + * spin_lock_irq(gcwq->lock). + */ +static inline void worker_clr_flags(struct worker *worker, unsigned int flags) +{ + worker->flags &= ~flags; +} + +/** * busy_worker_head - return the busy hash head for a work * @gcwq: gcwq of interest * @work: work to be hashed @@ -776,7 +808,7 @@ static void worker_enter_idle(struct worker *worker) BUG_ON(!list_empty(&worker->entry) && (worker->hentry.next || worker->hentry.pprev)); - worker->flags |= WORKER_IDLE; + worker_set_flags(worker, WORKER_IDLE, false); gcwq->nr_idle++; /* idle_list is LIFO */ @@ -800,7 +832,7 @@ static void worker_leave_idle(struct worker *worker) struct global_cwq *gcwq = worker->gcwq; BUG_ON(!(worker->flags & WORKER_IDLE)); - worker->flags &= ~WORKER_IDLE; + worker_clr_flags(worker, WORKER_IDLE); gcwq->nr_idle--; list_del_init(&worker->entry); } @@ -890,7 +922,7 @@ fail: */ static void start_worker(struct worker *worker) { - worker->flags |= WORKER_STARTED; + worker_set_flags(worker, WORKER_STARTED, false); worker->gcwq->nr_workers++; worker_enter_idle(worker); wake_up_process(worker->task); @@ -920,7 +952,7 @@ static void destroy_worker(struct worker *worker) gcwq->nr_idle--; list_del_init(&worker->entry); - worker->flags |= WORKER_DIE; + worker_set_flags(worker, WORKER_DIE, false); spin_unlock_irq(&gcwq->lock); @@ -2214,10 +2246,10 @@ static int __cpuinit trustee_thread(void *__gcwq) BUG_ON(gcwq->cpu != smp_processor_id()); list_for_each_entry(worker, &gcwq->idle_list, entry) - worker->flags |= WORKER_ROGUE; + worker_set_flags(worker, WORKER_ROGUE, false); for_each_busy_worker(worker, i, pos, gcwq) - worker->flags |= WORKER_ROGUE; + worker_set_flags(worker, WORKER_ROGUE, false); /* * We're now in charge. Notify and proceed to drain. We need @@ -2324,10 +2356,10 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, /* clear ROGUE from all workers */ list_for_each_entry(worker, &gcwq->idle_list, entry) - worker->flags &= ~WORKER_ROGUE; + worker_clr_flags(worker, WORKER_ROGUE); for_each_busy_worker(worker, i, pos, gcwq) - worker->flags &= ~WORKER_ROGUE; + worker_clr_flags(worker, WORKER_ROGUE); break; } -- cgit v1.1 From e22bee782b3b00bd4534ae9b1c5fb2e8e6573c5c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:14 +0200 Subject: workqueue: implement concurrency managed dynamic worker pool Instead of creating a worker for each cwq and putting it into the shared pool, manage per-cpu workers dynamically. Works aren't supposed to be cpu cycle hogs and maintaining just enough concurrency to prevent work processing from stalling due to lack of processing context is optimal. gcwq keeps the number of concurrent active workers to minimum but no less. As long as there's one or more running workers on the cpu, no new worker is scheduled so that works can be processed in batch as much as possible but when the last running worker blocks, gcwq immediately schedules new worker so that the cpu doesn't sit idle while there are works to be processed. gcwq always keeps at least single idle worker around. When a new worker is necessary and the worker is the last idle one, the worker assumes the role of "manager" and manages the worker pool - ie. creates another worker. Forward-progress is guaranteed by having dedicated rescue workers for workqueues which may be necessary while creating a new worker. When the manager is having problem creating a new worker, mayday timer activates and rescue workers are summoned to the cpu and execute works which might be necessary to create new workers. Trustee is expanded to serve the role of manager while a CPU is being taken down and stays down. As no new works are supposed to be queued on a dead cpu, it just needs to drain all the existing ones. Trustee continues to try to create new workers and summon rescuers as long as there are pending works. If the CPU is brought back up while the trustee is still trying to drain the gcwq from the previous offlining, the trustee will kill all idles ones and tell workers which are still busy to rebind to the cpu, and pass control over to gcwq which assumes the manager role as necessary. Concurrency managed worker pool reduces the number of workers drastically. Only workers which are necessary to keep the processing going are created and kept. Also, it reduces cache footprint by avoiding unnecessarily switching contexts between different workers. Please note that this patch does not increase max_active of any workqueue. All workqueues can still only process one work per cpu. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 936 +++++++++++++++++++++++++++++++++++++++++------ kernel/workqueue_sched.h | 13 +- 2 files changed, 836 insertions(+), 113 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4c31fde..0ad4652 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -34,17 +34,25 @@ #include #include #include -#include + +#include "workqueue_sched.h" enum { /* global_cwq flags */ + GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ + GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */ + GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ GCWQ_FREEZING = 1 << 3, /* freeze in progress */ /* worker flags */ WORKER_STARTED = 1 << 0, /* started */ WORKER_DIE = 1 << 1, /* die die die */ WORKER_IDLE = 1 << 2, /* is idle */ + WORKER_PREP = 1 << 3, /* preparing to run works */ WORKER_ROGUE = 1 << 4, /* not bound to any cpu */ + WORKER_REBIND = 1 << 5, /* mom is home, come back */ + + WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND, /* gcwq->trustee_state */ TRUSTEE_START = 0, /* start */ @@ -57,7 +65,19 @@ enum { BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER, BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1, + MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ + IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ + + MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ + MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ + CREATE_COOLDOWN = HZ, /* time to breath after fail */ TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ + + /* + * Rescue workers are used only on emergencies and shared by + * all cpus. Give -20. + */ + RESCUER_NICE_LEVEL = -20, }; /* @@ -65,8 +85,16 @@ enum { * * I: Set during initialization and read-only afterwards. * + * P: Preemption protected. Disabling preemption is enough and should + * only be modified and accessed from the local cpu. + * * L: gcwq->lock protected. Access with gcwq->lock held. * + * X: During normal operation, modification requires gcwq->lock and + * should be done only from local cpu. Either disabling preemption + * on local cpu or grabbing gcwq->lock is enough for read access. + * While trustee is in charge, it's identical to L. + * * F: wq->flush_mutex protected. * * W: workqueue_lock protected. @@ -74,6 +102,10 @@ enum { struct global_cwq; +/* + * The poor guys doing the actual heavy lifting. All on-duty workers + * are either serving the manager role, on idle list or on busy hash. + */ struct worker { /* on idle list while idle, on busy hash table while busy */ union { @@ -86,12 +118,17 @@ struct worker { struct list_head scheduled; /* L: scheduled works */ struct task_struct *task; /* I: worker task */ struct global_cwq *gcwq; /* I: the associated gcwq */ - unsigned int flags; /* L: flags */ + /* 64 bytes boundary on 64bit, 32 on 32bit */ + unsigned long last_active; /* L: last active timestamp */ + unsigned int flags; /* X: flags */ int id; /* I: worker id */ + struct work_struct rebind_work; /* L: rebind worker to cpu */ }; /* - * Global per-cpu workqueue. + * Global per-cpu workqueue. There's one and only one for each cpu + * and all works are queued and processed here regardless of their + * target workqueues. */ struct global_cwq { spinlock_t lock; /* the gcwq lock */ @@ -103,15 +140,19 @@ struct global_cwq { int nr_idle; /* L: currently idle ones */ /* workers are chained either in the idle_list or busy_hash */ - struct list_head idle_list; /* L: list of idle workers */ + struct list_head idle_list; /* X: list of idle workers */ struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE]; /* L: hash of busy workers */ + struct timer_list idle_timer; /* L: worker idle timeout */ + struct timer_list mayday_timer; /* L: SOS timer for dworkers */ + struct ida worker_ida; /* L: for worker IDs */ struct task_struct *trustee; /* L: for gcwq shutdown */ unsigned int trustee_state; /* L: trustee state */ wait_queue_head_t trustee_wait; /* trustee wait */ + struct worker *first_idle; /* L: first idle worker */ } ____cacheline_aligned_in_smp; /* @@ -121,7 +162,6 @@ struct global_cwq { */ struct cpu_workqueue_struct { struct global_cwq *gcwq; /* I: the associated gcwq */ - struct worker *worker; struct workqueue_struct *wq; /* I: the owning workqueue */ int work_color; /* L: current color */ int flush_color; /* L: flushing color */ @@ -160,6 +200,9 @@ struct workqueue_struct { unsigned long single_cpu; /* cpu for single cpu wq */ + cpumask_var_t mayday_mask; /* cpus requesting rescue */ + struct worker *rescuer; /* I: rescue worker */ + int saved_max_active; /* I: saved cwq max_active */ const char *name; /* I: workqueue name */ #ifdef CONFIG_LOCKDEP @@ -286,7 +329,13 @@ static DEFINE_SPINLOCK(workqueue_lock); static LIST_HEAD(workqueues); static bool workqueue_freezing; /* W: have wqs started freezing? */ +/* + * The almighty global cpu workqueues. nr_running is the only field + * which is expected to be used frequently by other cpus via + * try_to_wake_up(). Put it in a separate cacheline. + */ static DEFINE_PER_CPU(struct global_cwq, global_cwq); +static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running); static int worker_thread(void *__worker); @@ -295,6 +344,11 @@ static struct global_cwq *get_gcwq(unsigned int cpu) return &per_cpu(global_cwq, cpu); } +static atomic_t *get_gcwq_nr_running(unsigned int cpu) +{ + return &per_cpu(gcwq_nr_running, cpu); +} + static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, struct workqueue_struct *wq) { @@ -385,6 +439,63 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work) return get_gcwq(cpu); } +/* + * Policy functions. These define the policies on how the global + * worker pool is managed. Unless noted otherwise, these functions + * assume that they're being called with gcwq->lock held. + */ + +/* + * Need to wake up a worker? Called from anything but currently + * running workers. + */ +static bool need_more_worker(struct global_cwq *gcwq) +{ + atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); + + return !list_empty(&gcwq->worklist) && !atomic_read(nr_running); +} + +/* Can I start working? Called from busy but !running workers. */ +static bool may_start_working(struct global_cwq *gcwq) +{ + return gcwq->nr_idle; +} + +/* Do I need to keep working? Called from currently running workers. */ +static bool keep_working(struct global_cwq *gcwq) +{ + atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); + + return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1; +} + +/* Do we need a new worker? Called from manager. */ +static bool need_to_create_worker(struct global_cwq *gcwq) +{ + return need_more_worker(gcwq) && !may_start_working(gcwq); +} + +/* Do I need to be the manager? */ +static bool need_to_manage_workers(struct global_cwq *gcwq) +{ + return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS; +} + +/* Do we have too many workers and should some go away? */ +static bool too_many_workers(struct global_cwq *gcwq) +{ + bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS; + int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */ + int nr_busy = gcwq->nr_workers - nr_idle; + + return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; +} + +/* + * Wake up functions. + */ + /* Return the first worker. Safe with preemption disabled */ static struct worker *first_worker(struct global_cwq *gcwq) { @@ -412,12 +523,77 @@ static void wake_up_worker(struct global_cwq *gcwq) } /** - * worker_set_flags - set worker flags + * wq_worker_waking_up - a worker is waking up + * @task: task waking up + * @cpu: CPU @task is waking up to + * + * This function is called during try_to_wake_up() when a worker is + * being awoken. + * + * CONTEXT: + * spin_lock_irq(rq->lock) + */ +void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) +{ + struct worker *worker = kthread_data(task); + + if (likely(!(worker->flags & WORKER_NOT_RUNNING))) + atomic_inc(get_gcwq_nr_running(cpu)); +} + +/** + * wq_worker_sleeping - a worker is going to sleep + * @task: task going to sleep + * @cpu: CPU in question, must be the current CPU number + * + * This function is called during schedule() when a busy worker is + * going to sleep. Worker on the same cpu can be woken up by + * returning pointer to its task. + * + * CONTEXT: + * spin_lock_irq(rq->lock) + * + * RETURNS: + * Worker task on @cpu to wake up, %NULL if none. + */ +struct task_struct *wq_worker_sleeping(struct task_struct *task, + unsigned int cpu) +{ + struct worker *worker = kthread_data(task), *to_wakeup = NULL; + struct global_cwq *gcwq = get_gcwq(cpu); + atomic_t *nr_running = get_gcwq_nr_running(cpu); + + if (unlikely(worker->flags & WORKER_NOT_RUNNING)) + return NULL; + + /* this can only happen on the local cpu */ + BUG_ON(cpu != raw_smp_processor_id()); + + /* + * The counterpart of the following dec_and_test, implied mb, + * worklist not empty test sequence is in insert_work(). + * Please read comment there. + * + * NOT_RUNNING is clear. This means that trustee is not in + * charge and we're running on the local cpu w/ rq lock held + * and preemption disabled, which in turn means that none else + * could be manipulating idle_list, so dereferencing idle_list + * without gcwq lock is safe. + */ + if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist)) + to_wakeup = first_worker(gcwq); + return to_wakeup ? to_wakeup->task : NULL; +} + +/** + * worker_set_flags - set worker flags and adjust nr_running accordingly * @worker: worker to set flags for * @flags: flags to set * @wakeup: wakeup an idle worker if necessary * - * Set @flags in @worker->flags. + * Set @flags in @worker->flags and adjust nr_running accordingly. If + * nr_running becomes zero and @wakeup is %true, an idle worker is + * woken up. * * LOCKING: * spin_lock_irq(gcwq->lock). @@ -425,22 +601,49 @@ static void wake_up_worker(struct global_cwq *gcwq) static inline void worker_set_flags(struct worker *worker, unsigned int flags, bool wakeup) { + struct global_cwq *gcwq = worker->gcwq; + + /* + * If transitioning into NOT_RUNNING, adjust nr_running and + * wake up an idle worker as necessary if requested by + * @wakeup. + */ + if ((flags & WORKER_NOT_RUNNING) && + !(worker->flags & WORKER_NOT_RUNNING)) { + atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); + + if (wakeup) { + if (atomic_dec_and_test(nr_running) && + !list_empty(&gcwq->worklist)) + wake_up_worker(gcwq); + } else + atomic_dec(nr_running); + } + worker->flags |= flags; } /** - * worker_clr_flags - clear worker flags + * worker_clr_flags - clear worker flags and adjust nr_running accordingly * @worker: worker to set flags for * @flags: flags to clear * - * Clear @flags in @worker->flags. + * Clear @flags in @worker->flags and adjust nr_running accordingly. * * LOCKING: * spin_lock_irq(gcwq->lock). */ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) { + struct global_cwq *gcwq = worker->gcwq; + unsigned int oflags = worker->flags; + worker->flags &= ~flags; + + /* if transitioning out of NOT_RUNNING, increment nr_running */ + if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) + if (!(worker->flags & WORKER_NOT_RUNNING)) + atomic_inc(get_gcwq_nr_running(gcwq->cpu)); } /** @@ -540,6 +743,8 @@ static void insert_work(struct cpu_workqueue_struct *cwq, struct work_struct *work, struct list_head *head, unsigned int extra_flags) { + struct global_cwq *gcwq = cwq->gcwq; + /* we own @work, set data and link */ set_work_cwq(work, cwq, extra_flags); @@ -550,7 +755,16 @@ static void insert_work(struct cpu_workqueue_struct *cwq, smp_wmb(); list_add_tail(&work->entry, head); - wake_up_worker(cwq->gcwq); + + /* + * Ensure either worker_sched_deactivated() sees the above + * list_add_tail() or we see zero nr_running to avoid workers + * lying around lazily while there are works to be processed. + */ + smp_mb(); + + if (!atomic_read(get_gcwq_nr_running(gcwq->cpu))) + wake_up_worker(gcwq); } /** @@ -810,11 +1024,16 @@ static void worker_enter_idle(struct worker *worker) worker_set_flags(worker, WORKER_IDLE, false); gcwq->nr_idle++; + worker->last_active = jiffies; /* idle_list is LIFO */ list_add(&worker->entry, &gcwq->idle_list); - if (unlikely(worker->flags & WORKER_ROGUE)) + if (likely(!(worker->flags & WORKER_ROGUE))) { + if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer)) + mod_timer(&gcwq->idle_timer, + jiffies + IDLE_WORKER_TIMEOUT); + } else wake_up_all(&gcwq->trustee_wait); } @@ -837,6 +1056,81 @@ static void worker_leave_idle(struct worker *worker) list_del_init(&worker->entry); } +/** + * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq + * @worker: self + * + * Works which are scheduled while the cpu is online must at least be + * scheduled to a worker which is bound to the cpu so that if they are + * flushed from cpu callbacks while cpu is going down, they are + * guaranteed to execute on the cpu. + * + * This function is to be used by rogue workers and rescuers to bind + * themselves to the target cpu and may race with cpu going down or + * coming online. kthread_bind() can't be used because it may put the + * worker to already dead cpu and set_cpus_allowed_ptr() can't be used + * verbatim as it's best effort and blocking and gcwq may be + * [dis]associated in the meantime. + * + * This function tries set_cpus_allowed() and locks gcwq and verifies + * the binding against GCWQ_DISASSOCIATED which is set during + * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters + * idle state or fetches works without dropping lock, it can guarantee + * the scheduling requirement described in the first paragraph. + * + * CONTEXT: + * Might sleep. Called without any lock but returns with gcwq->lock + * held. + * + * RETURNS: + * %true if the associated gcwq is online (@worker is successfully + * bound), %false if offline. + */ +static bool worker_maybe_bind_and_lock(struct worker *worker) +{ + struct global_cwq *gcwq = worker->gcwq; + struct task_struct *task = worker->task; + + while (true) { + /* + * The following call may fail, succeed or succeed + * without actually migrating the task to the cpu if + * it races with cpu hotunplug operation. Verify + * against GCWQ_DISASSOCIATED. + */ + set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu)); + + spin_lock_irq(&gcwq->lock); + if (gcwq->flags & GCWQ_DISASSOCIATED) + return false; + if (task_cpu(task) == gcwq->cpu && + cpumask_equal(¤t->cpus_allowed, + get_cpu_mask(gcwq->cpu))) + return true; + spin_unlock_irq(&gcwq->lock); + + /* CPU has come up inbetween, retry migration */ + cpu_relax(); + } +} + +/* + * Function for worker->rebind_work used to rebind rogue busy workers + * to the associated cpu which is coming back online. This is + * scheduled by cpu up but can race with other cpu hotplug operations + * and may be executed twice without intervening cpu down. + */ +static void worker_rebind_fn(struct work_struct *work) +{ + struct worker *worker = container_of(work, struct worker, rebind_work); + struct global_cwq *gcwq = worker->gcwq; + + if (worker_maybe_bind_and_lock(worker)) + worker_clr_flags(worker, WORKER_REBIND); + + spin_unlock_irq(&gcwq->lock); +} + static struct worker *alloc_worker(void) { struct worker *worker; @@ -845,6 +1139,9 @@ static struct worker *alloc_worker(void) if (worker) { INIT_LIST_HEAD(&worker->entry); INIT_LIST_HEAD(&worker->scheduled); + INIT_WORK(&worker->rebind_work, worker_rebind_fn); + /* on creation a worker is in !idle && prep state */ + worker->flags = WORKER_PREP; } return worker; } @@ -963,6 +1260,220 @@ static void destroy_worker(struct worker *worker) ida_remove(&gcwq->worker_ida, id); } +static void idle_worker_timeout(unsigned long __gcwq) +{ + struct global_cwq *gcwq = (void *)__gcwq; + + spin_lock_irq(&gcwq->lock); + + if (too_many_workers(gcwq)) { + struct worker *worker; + unsigned long expires; + + /* idle_list is kept in LIFO order, check the last one */ + worker = list_entry(gcwq->idle_list.prev, struct worker, entry); + expires = worker->last_active + IDLE_WORKER_TIMEOUT; + + if (time_before(jiffies, expires)) + mod_timer(&gcwq->idle_timer, expires); + else { + /* it's been idle for too long, wake up manager */ + gcwq->flags |= GCWQ_MANAGE_WORKERS; + wake_up_worker(gcwq); + } + } + + spin_unlock_irq(&gcwq->lock); +} + +static bool send_mayday(struct work_struct *work) +{ + struct cpu_workqueue_struct *cwq = get_work_cwq(work); + struct workqueue_struct *wq = cwq->wq; + + if (!(wq->flags & WQ_RESCUER)) + return false; + + /* mayday mayday mayday */ + if (!cpumask_test_and_set_cpu(cwq->gcwq->cpu, wq->mayday_mask)) + wake_up_process(wq->rescuer->task); + return true; +} + +static void gcwq_mayday_timeout(unsigned long __gcwq) +{ + struct global_cwq *gcwq = (void *)__gcwq; + struct work_struct *work; + + spin_lock_irq(&gcwq->lock); + + if (need_to_create_worker(gcwq)) { + /* + * We've been trying to create a new worker but + * haven't been successful. We might be hitting an + * allocation deadlock. Send distress signals to + * rescuers. + */ + list_for_each_entry(work, &gcwq->worklist, entry) + send_mayday(work); + } + + spin_unlock_irq(&gcwq->lock); + + mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL); +} + +/** + * maybe_create_worker - create a new worker if necessary + * @gcwq: gcwq to create a new worker for + * + * Create a new worker for @gcwq if necessary. @gcwq is guaranteed to + * have at least one idle worker on return from this function. If + * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is + * sent to all rescuers with works scheduled on @gcwq to resolve + * possible allocation deadlock. + * + * On return, need_to_create_worker() is guaranteed to be false and + * may_start_working() true. + * + * LOCKING: + * spin_lock_irq(gcwq->lock) which may be released and regrabbed + * multiple times. Does GFP_KERNEL allocations. Called only from + * manager. + * + * RETURNS: + * false if no action was taken and gcwq->lock stayed locked, true + * otherwise. + */ +static bool maybe_create_worker(struct global_cwq *gcwq) +{ + if (!need_to_create_worker(gcwq)) + return false; +restart: + /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ + mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); + + while (true) { + struct worker *worker; + + spin_unlock_irq(&gcwq->lock); + + worker = create_worker(gcwq, true); + if (worker) { + del_timer_sync(&gcwq->mayday_timer); + spin_lock_irq(&gcwq->lock); + start_worker(worker); + BUG_ON(need_to_create_worker(gcwq)); + return true; + } + + if (!need_to_create_worker(gcwq)) + break; + + spin_unlock_irq(&gcwq->lock); + __set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(CREATE_COOLDOWN); + spin_lock_irq(&gcwq->lock); + if (!need_to_create_worker(gcwq)) + break; + } + + spin_unlock_irq(&gcwq->lock); + del_timer_sync(&gcwq->mayday_timer); + spin_lock_irq(&gcwq->lock); + if (need_to_create_worker(gcwq)) + goto restart; + return true; +} + +/** + * maybe_destroy_worker - destroy workers which have been idle for a while + * @gcwq: gcwq to destroy workers for + * + * Destroy @gcwq workers which have been idle for longer than + * IDLE_WORKER_TIMEOUT. + * + * LOCKING: + * spin_lock_irq(gcwq->lock) which may be released and regrabbed + * multiple times. Called only from manager. + * + * RETURNS: + * false if no action was taken and gcwq->lock stayed locked, true + * otherwise. + */ +static bool maybe_destroy_workers(struct global_cwq *gcwq) +{ + bool ret = false; + + while (too_many_workers(gcwq)) { + struct worker *worker; + unsigned long expires; + + worker = list_entry(gcwq->idle_list.prev, struct worker, entry); + expires = worker->last_active + IDLE_WORKER_TIMEOUT; + + if (time_before(jiffies, expires)) { + mod_timer(&gcwq->idle_timer, expires); + break; + } + + destroy_worker(worker); + ret = true; + } + + return ret; +} + +/** + * manage_workers - manage worker pool + * @worker: self + * + * Assume the manager role and manage gcwq worker pool @worker belongs + * to. At any given time, there can be only zero or one manager per + * gcwq. The exclusion is handled automatically by this function. + * + * The caller can safely start processing works on false return. On + * true return, it's guaranteed that need_to_create_worker() is false + * and may_start_working() is true. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock) which may be released and regrabbed + * multiple times. Does GFP_KERNEL allocations. + * + * RETURNS: + * false if no action was taken and gcwq->lock stayed locked, true if + * some action was taken. + */ +static bool manage_workers(struct worker *worker) +{ + struct global_cwq *gcwq = worker->gcwq; + bool ret = false; + + if (gcwq->flags & GCWQ_MANAGING_WORKERS) + return ret; + + gcwq->flags &= ~GCWQ_MANAGE_WORKERS; + gcwq->flags |= GCWQ_MANAGING_WORKERS; + + /* + * Destroy and then create so that may_start_working() is true + * on return. + */ + ret |= maybe_destroy_workers(gcwq); + ret |= maybe_create_worker(gcwq); + + gcwq->flags &= ~GCWQ_MANAGING_WORKERS; + + /* + * The trustee might be waiting to take over the manager + * position, tell it we're done. + */ + if (unlikely(gcwq->trustee)) + wake_up_all(&gcwq->trustee_wait); + + return ret; +} + /** * move_linked_works - move linked works to a list * @work: start of series of works to be scheduled @@ -1169,24 +1680,39 @@ static void process_scheduled_works(struct worker *worker) * worker_thread - the worker thread function * @__worker: self * - * The cwq worker thread function. + * The gcwq worker thread function. There's a single dynamic pool of + * these per each cpu. These workers process all works regardless of + * their specific target workqueue. The only exception is works which + * belong to workqueues with a rescuer which will be explained in + * rescuer_thread(). */ static int worker_thread(void *__worker) { struct worker *worker = __worker; struct global_cwq *gcwq = worker->gcwq; + /* tell the scheduler that this is a workqueue worker */ + worker->task->flags |= PF_WQ_WORKER; woke_up: spin_lock_irq(&gcwq->lock); /* DIE can be set only while we're idle, checking here is enough */ if (worker->flags & WORKER_DIE) { spin_unlock_irq(&gcwq->lock); + worker->task->flags &= ~PF_WQ_WORKER; return 0; } worker_leave_idle(worker); recheck: + /* no more worker necessary? */ + if (!need_more_worker(gcwq)) + goto sleep; + + /* do we need to manage? */ + if (unlikely(!may_start_working(gcwq)) && manage_workers(worker)) + goto recheck; + /* * ->scheduled list can only be filled while a worker is * preparing to process a work or actually processing it. @@ -1194,27 +1720,18 @@ recheck: */ BUG_ON(!list_empty(&worker->scheduled)); - while (!list_empty(&gcwq->worklist)) { + /* + * When control reaches this point, we're guaranteed to have + * at least one idle worker or that someone else has already + * assumed the manager role. + */ + worker_clr_flags(worker, WORKER_PREP); + + do { struct work_struct *work = list_first_entry(&gcwq->worklist, struct work_struct, entry); - /* - * The following is a rather inefficient way to close - * race window against cpu hotplug operations. Will - * be replaced soon. - */ - if (unlikely(!(worker->flags & WORKER_ROGUE) && - !cpumask_equal(&worker->task->cpus_allowed, - get_cpu_mask(gcwq->cpu)))) { - spin_unlock_irq(&gcwq->lock); - set_cpus_allowed_ptr(worker->task, - get_cpu_mask(gcwq->cpu)); - cpu_relax(); - spin_lock_irq(&gcwq->lock); - goto recheck; - } - if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { /* optimization path, not strictly necessary */ process_one_work(worker, work); @@ -1224,13 +1741,19 @@ recheck: move_linked_works(work, &worker->scheduled, NULL); process_scheduled_works(worker); } - } + } while (keep_working(gcwq)); + + worker_set_flags(worker, WORKER_PREP, false); + if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker)) + goto recheck; +sleep: /* - * gcwq->lock is held and there's no work to process, sleep. - * Workers are woken up only while holding gcwq->lock, so - * setting the current state before releasing gcwq->lock is - * enough to prevent losing any event. + * gcwq->lock is held and there's no work to process and no + * need to manage, sleep. Workers are woken up only while + * holding gcwq->lock or from local cpu, so setting the + * current state before releasing gcwq->lock is enough to + * prevent losing any event. */ worker_enter_idle(worker); __set_current_state(TASK_INTERRUPTIBLE); @@ -1239,6 +1762,68 @@ recheck: goto woke_up; } +/** + * rescuer_thread - the rescuer thread function + * @__wq: the associated workqueue + * + * Workqueue rescuer thread function. There's one rescuer for each + * workqueue which has WQ_RESCUER set. + * + * Regular work processing on a gcwq may block trying to create a new + * worker which uses GFP_KERNEL allocation which has slight chance of + * developing into deadlock if some works currently on the same queue + * need to be processed to satisfy the GFP_KERNEL allocation. This is + * the problem rescuer solves. + * + * When such condition is possible, the gcwq summons rescuers of all + * workqueues which have works queued on the gcwq and let them process + * those works so that forward progress can be guaranteed. + * + * This should happen rarely. + */ +static int rescuer_thread(void *__wq) +{ + struct workqueue_struct *wq = __wq; + struct worker *rescuer = wq->rescuer; + struct list_head *scheduled = &rescuer->scheduled; + unsigned int cpu; + + set_user_nice(current, RESCUER_NICE_LEVEL); +repeat: + set_current_state(TASK_INTERRUPTIBLE); + + if (kthread_should_stop()) + return 0; + + for_each_cpu(cpu, wq->mayday_mask) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + struct global_cwq *gcwq = cwq->gcwq; + struct work_struct *work, *n; + + __set_current_state(TASK_RUNNING); + cpumask_clear_cpu(cpu, wq->mayday_mask); + + /* migrate to the target cpu if possible */ + rescuer->gcwq = gcwq; + worker_maybe_bind_and_lock(rescuer); + + /* + * Slurp in all works issued via this workqueue and + * process'em. + */ + BUG_ON(!list_empty(&rescuer->scheduled)); + list_for_each_entry_safe(work, n, &gcwq->worklist, entry) + if (get_work_cwq(work) == cwq) + move_linked_works(work, scheduled, &n); + + process_scheduled_works(rescuer); + spin_unlock_irq(&gcwq->lock); + } + + schedule(); + goto repeat; +} + struct wq_barrier { struct work_struct work; struct completion done; @@ -1998,7 +2583,6 @@ struct workqueue_struct *__create_workqueue_key(const char *name, const char *lock_name) { struct workqueue_struct *wq; - bool failed = false; unsigned int cpu; max_active = clamp_val(max_active, 1, INT_MAX); @@ -2023,13 +2607,6 @@ struct workqueue_struct *__create_workqueue_key(const char *name, lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); INIT_LIST_HEAD(&wq->list); - cpu_maps_update_begin(); - /* - * We must initialize cwqs for each possible cpu even if we - * are going to call destroy_workqueue() finally. Otherwise - * cpu_up() can hit the uninitialized cwq once we drop the - * lock. - */ for_each_possible_cpu(cpu) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct global_cwq *gcwq = get_gcwq(cpu); @@ -2040,14 +2617,25 @@ struct workqueue_struct *__create_workqueue_key(const char *name, cwq->flush_color = -1; cwq->max_active = max_active; INIT_LIST_HEAD(&cwq->delayed_works); + } - if (failed) - continue; - cwq->worker = create_worker(gcwq, cpu_online(cpu)); - if (cwq->worker) - start_worker(cwq->worker); - else - failed = true; + if (flags & WQ_RESCUER) { + struct worker *rescuer; + + if (!alloc_cpumask_var(&wq->mayday_mask, GFP_KERNEL)) + goto err; + + wq->rescuer = rescuer = alloc_worker(); + if (!rescuer) + goto err; + + rescuer->task = kthread_create(rescuer_thread, wq, "%s", name); + if (IS_ERR(rescuer->task)) + goto err; + + wq->rescuer = rescuer; + rescuer->task->flags |= PF_THREAD_BOUND; + wake_up_process(rescuer->task); } /* @@ -2065,16 +2653,12 @@ struct workqueue_struct *__create_workqueue_key(const char *name, spin_unlock(&workqueue_lock); - cpu_maps_update_done(); - - if (failed) { - destroy_workqueue(wq); - wq = NULL; - } return wq; err: if (wq) { free_cwqs(wq->cpu_wq); + free_cpumask_var(wq->mayday_mask); + kfree(wq->rescuer); kfree(wq); } return NULL; @@ -2097,42 +2681,26 @@ void destroy_workqueue(struct workqueue_struct *wq) * wq list is used to freeze wq, remove from list after * flushing is complete in case freeze races us. */ - cpu_maps_update_begin(); spin_lock(&workqueue_lock); list_del(&wq->list); spin_unlock(&workqueue_lock); - cpu_maps_update_done(); + /* sanity check */ for_each_possible_cpu(cpu) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - struct global_cwq *gcwq = cwq->gcwq; int i; - if (cwq->worker) { - retry: - spin_lock_irq(&gcwq->lock); - /* - * Worker can only be destroyed while idle. - * Wait till it becomes idle. This is ugly - * and prone to starvation. It will go away - * once dynamic worker pool is implemented. - */ - if (!(cwq->worker->flags & WORKER_IDLE)) { - spin_unlock_irq(&gcwq->lock); - msleep(100); - goto retry; - } - destroy_worker(cwq->worker); - cwq->worker = NULL; - spin_unlock_irq(&gcwq->lock); - } - for (i = 0; i < WORK_NR_COLORS; i++) BUG_ON(cwq->nr_in_flight[i]); BUG_ON(cwq->nr_active); BUG_ON(!list_empty(&cwq->delayed_works)); } + if (wq->flags & WQ_RESCUER) { + kthread_stop(wq->rescuer->task); + free_cpumask_var(wq->mayday_mask); + } + free_cwqs(wq->cpu_wq); kfree(wq); } @@ -2141,10 +2709,18 @@ EXPORT_SYMBOL_GPL(destroy_workqueue); /* * CPU hotplug. * - * CPU hotplug is implemented by allowing cwqs to be detached from - * CPU, running with unbound workers and allowing them to be - * reattached later if the cpu comes back online. A separate thread - * is created to govern cwqs in such state and is called the trustee. + * There are two challenges in supporting CPU hotplug. Firstly, there + * are a lot of assumptions on strong associations among work, cwq and + * gcwq which make migrating pending and scheduled works very + * difficult to implement without impacting hot paths. Secondly, + * gcwqs serve mix of short, long and very long running works making + * blocked draining impractical. + * + * This is solved by allowing a gcwq to be detached from CPU, running + * it with unbound (rogue) workers and allowing it to be reattached + * later if the cpu comes back online. A separate thread is created + * to govern a gcwq in such state and is called the trustee of the + * gcwq. * * Trustee states and their descriptions. * @@ -2152,11 +2728,12 @@ EXPORT_SYMBOL_GPL(destroy_workqueue); * new trustee is started with this state. * * IN_CHARGE Once started, trustee will enter this state after - * making all existing workers rogue. DOWN_PREPARE waits - * for trustee to enter this state. After reaching - * IN_CHARGE, trustee tries to execute the pending - * worklist until it's empty and the state is set to - * BUTCHER, or the state is set to RELEASE. + * assuming the manager role and making all existing + * workers rogue. DOWN_PREPARE waits for trustee to + * enter this state. After reaching IN_CHARGE, trustee + * tries to execute the pending worklist until it's empty + * and the state is set to BUTCHER, or the state is set + * to RELEASE. * * BUTCHER Command state which is set by the cpu callback after * the cpu has went down. Once this state is set trustee @@ -2167,7 +2744,9 @@ EXPORT_SYMBOL_GPL(destroy_workqueue); * RELEASE Command state which is set by the cpu callback if the * cpu down has been canceled or it has come online * again. After recognizing this state, trustee stops - * trying to drain or butcher and transits to DONE. + * trying to drain or butcher and clears ROGUE, rebinds + * all remaining workers back to the cpu and releases + * manager role. * * DONE Trustee will enter this state after BUTCHER or RELEASE * is complete. @@ -2233,17 +2812,24 @@ static int __cpuinit trustee_thread(void *__gcwq) { struct global_cwq *gcwq = __gcwq; struct worker *worker; + struct work_struct *work; struct hlist_node *pos; + long rc; int i; BUG_ON(gcwq->cpu != smp_processor_id()); spin_lock_irq(&gcwq->lock); /* - * Make all workers rogue. Trustee must be bound to the - * target cpu and can't be cancelled. + * Claim the manager position and make all workers rogue. + * Trustee must be bound to the target cpu and can't be + * cancelled. */ BUG_ON(gcwq->cpu != smp_processor_id()); + rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS)); + BUG_ON(rc < 0); + + gcwq->flags |= GCWQ_MANAGING_WORKERS; list_for_each_entry(worker, &gcwq->idle_list, entry) worker_set_flags(worker, WORKER_ROGUE, false); @@ -2252,6 +2838,28 @@ static int __cpuinit trustee_thread(void *__gcwq) worker_set_flags(worker, WORKER_ROGUE, false); /* + * Call schedule() so that we cross rq->lock and thus can + * guarantee sched callbacks see the rogue flag. This is + * necessary as scheduler callbacks may be invoked from other + * cpus. + */ + spin_unlock_irq(&gcwq->lock); + schedule(); + spin_lock_irq(&gcwq->lock); + + /* + * Sched callbacks are disabled now. gcwq->nr_running should + * be zero and will stay that way, making need_more_worker() + * and keep_working() always return true as long as the + * worklist is not empty. + */ + WARN_ON_ONCE(atomic_read(get_gcwq_nr_running(gcwq->cpu)) != 0); + + spin_unlock_irq(&gcwq->lock); + del_timer_sync(&gcwq->idle_timer); + spin_lock_irq(&gcwq->lock); + + /* * We're now in charge. Notify and proceed to drain. We need * to keep the gcwq running during the whole CPU down * procedure as other cpu hotunplug callbacks may need to @@ -2263,18 +2871,90 @@ static int __cpuinit trustee_thread(void *__gcwq) /* * The original cpu is in the process of dying and may go away * anytime now. When that happens, we and all workers would - * be migrated to other cpus. Try draining any left work. - * Note that if the gcwq is frozen, there may be frozen works - * in freezeable cwqs. Don't declare completion while frozen. + * be migrated to other cpus. Try draining any left work. We + * want to get it over with ASAP - spam rescuers, wake up as + * many idlers as necessary and create new ones till the + * worklist is empty. Note that if the gcwq is frozen, there + * may be frozen works in freezeable cwqs. Don't declare + * completion while frozen. */ while (gcwq->nr_workers != gcwq->nr_idle || gcwq->flags & GCWQ_FREEZING || gcwq->trustee_state == TRUSTEE_IN_CHARGE) { + int nr_works = 0; + + list_for_each_entry(work, &gcwq->worklist, entry) { + send_mayday(work); + nr_works++; + } + + list_for_each_entry(worker, &gcwq->idle_list, entry) { + if (!nr_works--) + break; + wake_up_process(worker->task); + } + + if (need_to_create_worker(gcwq)) { + spin_unlock_irq(&gcwq->lock); + worker = create_worker(gcwq, false); + spin_lock_irq(&gcwq->lock); + if (worker) { + worker_set_flags(worker, WORKER_ROGUE, false); + start_worker(worker); + } + } + /* give a breather */ if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0) break; } + /* + * Either all works have been scheduled and cpu is down, or + * cpu down has already been canceled. Wait for and butcher + * all workers till we're canceled. + */ + do { + rc = trustee_wait_event(!list_empty(&gcwq->idle_list)); + while (!list_empty(&gcwq->idle_list)) + destroy_worker(list_first_entry(&gcwq->idle_list, + struct worker, entry)); + } while (gcwq->nr_workers && rc >= 0); + + /* + * At this point, either draining has completed and no worker + * is left, or cpu down has been canceled or the cpu is being + * brought back up. There shouldn't be any idle one left. + * Tell the remaining busy ones to rebind once it finishes the + * currently scheduled works by scheduling the rebind_work. + */ + WARN_ON(!list_empty(&gcwq->idle_list)); + + for_each_busy_worker(worker, i, pos, gcwq) { + struct work_struct *rebind_work = &worker->rebind_work; + + /* + * Rebind_work may race with future cpu hotplug + * operations. Use a separate flag to mark that + * rebinding is scheduled. + */ + worker_set_flags(worker, WORKER_REBIND, false); + worker_clr_flags(worker, WORKER_ROGUE); + + /* queue rebind_work, wq doesn't matter, use the default one */ + if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, + work_data_bits(rebind_work))) + continue; + + debug_work_activate(rebind_work); + insert_work(get_cwq(gcwq->cpu, keventd_wq), rebind_work, + worker->scheduled.next, + work_color_to_flags(WORK_NO_COLOR)); + } + + /* relinquish manager role */ + gcwq->flags &= ~GCWQ_MANAGING_WORKERS; + /* notify completion */ gcwq->trustee = NULL; gcwq->trustee_state = TRUSTEE_DONE; @@ -2313,10 +2993,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, unsigned int cpu = (unsigned long)hcpu; struct global_cwq *gcwq = get_gcwq(cpu); struct task_struct *new_trustee = NULL; - struct worker *worker; - struct hlist_node *pos; + struct worker *uninitialized_var(new_worker); unsigned long flags; - int i; action &= ~CPU_TASKS_FROZEN; @@ -2327,6 +3005,15 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, if (IS_ERR(new_trustee)) return notifier_from_errno(PTR_ERR(new_trustee)); kthread_bind(new_trustee, cpu); + /* fall through */ + case CPU_UP_PREPARE: + BUG_ON(gcwq->first_idle); + new_worker = create_worker(gcwq, false); + if (!new_worker) { + if (new_trustee) + kthread_stop(new_trustee); + return NOTIFY_BAD; + } } /* some are called w/ irq disabled, don't disturb irq status */ @@ -2340,26 +3027,50 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, gcwq->trustee_state = TRUSTEE_START; wake_up_process(gcwq->trustee); wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE); + /* fall through */ + case CPU_UP_PREPARE: + BUG_ON(gcwq->first_idle); + gcwq->first_idle = new_worker; + break; + + case CPU_DYING: + /* + * Before this, the trustee and all workers except for + * the ones which are still executing works from + * before the last CPU down must be on the cpu. After + * this, they'll all be diasporas. + */ + gcwq->flags |= GCWQ_DISASSOCIATED; break; case CPU_POST_DEAD: gcwq->trustee_state = TRUSTEE_BUTCHER; + /* fall through */ + case CPU_UP_CANCELED: + destroy_worker(gcwq->first_idle); + gcwq->first_idle = NULL; break; case CPU_DOWN_FAILED: case CPU_ONLINE: + gcwq->flags &= ~GCWQ_DISASSOCIATED; if (gcwq->trustee_state != TRUSTEE_DONE) { gcwq->trustee_state = TRUSTEE_RELEASE; wake_up_process(gcwq->trustee); wait_trustee_state(gcwq, TRUSTEE_DONE); } - /* clear ROGUE from all workers */ - list_for_each_entry(worker, &gcwq->idle_list, entry) - worker_clr_flags(worker, WORKER_ROGUE); - - for_each_busy_worker(worker, i, pos, gcwq) - worker_clr_flags(worker, WORKER_ROGUE); + /* + * Trustee is done and there might be no worker left. + * Put the first_idle in and request a real manager to + * take a look. + */ + spin_unlock_irq(&gcwq->lock); + kthread_bind(gcwq->first_idle->task, cpu); + spin_lock_irq(&gcwq->lock); + gcwq->flags |= GCWQ_MANAGE_WORKERS; + start_worker(gcwq->first_idle); + gcwq->first_idle = NULL; break; } @@ -2548,10 +3259,10 @@ void thaw_workqueues(void) if (wq->single_cpu == gcwq->cpu && !cwq->nr_active && list_empty(&cwq->delayed_works)) cwq_unbind_single_cpu(cwq); - - wake_up_process(cwq->worker->task); } + wake_up_worker(gcwq); + spin_unlock_irq(&gcwq->lock); } @@ -2588,12 +3299,31 @@ void __init init_workqueues(void) for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) INIT_HLIST_HEAD(&gcwq->busy_hash[i]); + init_timer_deferrable(&gcwq->idle_timer); + gcwq->idle_timer.function = idle_worker_timeout; + gcwq->idle_timer.data = (unsigned long)gcwq; + + setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout, + (unsigned long)gcwq); + ida_init(&gcwq->worker_ida); gcwq->trustee_state = TRUSTEE_DONE; init_waitqueue_head(&gcwq->trustee_wait); } + /* create the initial worker */ + for_each_online_cpu(cpu) { + struct global_cwq *gcwq = get_gcwq(cpu); + struct worker *worker; + + worker = create_worker(gcwq, true); + BUG_ON(!worker); + spin_lock_irq(&gcwq->lock); + start_worker(worker); + spin_unlock_irq(&gcwq->lock); + } + keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq); } diff --git a/kernel/workqueue_sched.h b/kernel/workqueue_sched.h index af040ba..2d10fc9 100644 --- a/kernel/workqueue_sched.h +++ b/kernel/workqueue_sched.h @@ -4,13 +4,6 @@ * Scheduler hooks for concurrency managed workqueue. Only to be * included from sched.c and workqueue.c. */ -static inline void wq_worker_waking_up(struct task_struct *task, - unsigned int cpu) -{ -} - -static inline struct task_struct *wq_worker_sleeping(struct task_struct *task, - unsigned int cpu) -{ - return NULL; -} +void wq_worker_waking_up(struct task_struct *task, unsigned int cpu); +struct task_struct *wq_worker_sleeping(struct task_struct *task, + unsigned int cpu); -- cgit v1.1 From b71ab8c2025caef8db719aa41af0ed735dc543cd Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:14 +0200 Subject: workqueue: increase max_active of keventd and kill current_is_keventd() Define WQ_MAX_ACTIVE and create keventd with max_active set to half of it which means that keventd now can process upto WQ_MAX_ACTIVE / 2 - 1 works concurrently. Unless some combination can result in dependency loop longer than max_active, deadlock won't happen and thus it's unnecessary to check whether current_is_keventd() before trying to schedule a work. Kill current_is_keventd(). (Lockdep annotations are broken. We need lock_map_acquire_read_norecurse()) Signed-off-by: Tejun Heo Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Christoph Lameter Cc: Tony Luck Cc: Andi Kleen Cc: Oleg Nesterov --- kernel/workqueue.c | 63 +++++++++++------------------------------------------- 1 file changed, 13 insertions(+), 50 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0ad4652..4190e84 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2398,7 +2398,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on); int schedule_on_each_cpu(work_func_t func) { int cpu; - int orig = -1; struct work_struct *works; works = alloc_percpu(struct work_struct); @@ -2407,23 +2406,12 @@ int schedule_on_each_cpu(work_func_t func) get_online_cpus(); - /* - * When running in keventd don't schedule a work item on - * itself. Can just call directly because the work queue is - * already bound. This also is faster. - */ - if (current_is_keventd()) - orig = raw_smp_processor_id(); - for_each_online_cpu(cpu) { struct work_struct *work = per_cpu_ptr(works, cpu); INIT_WORK(work, func); - if (cpu != orig) - schedule_work_on(cpu, work); + schedule_work_on(cpu, work); } - if (orig >= 0) - func(per_cpu_ptr(works, orig)); for_each_online_cpu(cpu) flush_work(per_cpu_ptr(works, cpu)); @@ -2494,41 +2482,6 @@ int keventd_up(void) return keventd_wq != NULL; } -int current_is_keventd(void) -{ - bool found = false; - unsigned int cpu; - - /* - * There no longer is one-to-one relation between worker and - * work queue and a worker task might be unbound from its cpu - * if the cpu was offlined. Match all busy workers. This - * function will go away once dynamic pool is implemented. - */ - for_each_possible_cpu(cpu) { - struct global_cwq *gcwq = get_gcwq(cpu); - struct worker *worker; - struct hlist_node *pos; - unsigned long flags; - int i; - - spin_lock_irqsave(&gcwq->lock, flags); - - for_each_busy_worker(worker, i, pos, gcwq) { - if (worker->task == current) { - found = true; - break; - } - } - - spin_unlock_irqrestore(&gcwq->lock, flags); - if (found) - break; - } - - return found; -} - static struct cpu_workqueue_struct *alloc_cwqs(void) { /* @@ -2576,6 +2529,16 @@ static void free_cwqs(struct cpu_workqueue_struct *cwqs) #endif } +static int wq_clamp_max_active(int max_active, const char *name) +{ + if (max_active < 1 || max_active > WQ_MAX_ACTIVE) + printk(KERN_WARNING "workqueue: max_active %d requested for %s " + "is out of range, clamping between %d and %d\n", + max_active, name, 1, WQ_MAX_ACTIVE); + + return clamp_val(max_active, 1, WQ_MAX_ACTIVE); +} + struct workqueue_struct *__create_workqueue_key(const char *name, unsigned int flags, int max_active, @@ -2585,7 +2548,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, struct workqueue_struct *wq; unsigned int cpu; - max_active = clamp_val(max_active, 1, INT_MAX); + max_active = wq_clamp_max_active(max_active, name); wq = kzalloc(sizeof(*wq), GFP_KERNEL); if (!wq) @@ -3324,6 +3287,6 @@ void __init init_workqueues(void) spin_unlock_irq(&gcwq->lock); } - keventd_wq = create_workqueue("events"); + keventd_wq = __create_workqueue("events", 0, WQ_DFL_ACTIVE); BUG_ON(!keventd_wq); } -- cgit v1.1 From d320c03830b17af64e4547075003b1eeb274bc6c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:14 +0200 Subject: workqueue: s/__create_workqueue()/alloc_workqueue()/, and add system workqueues This patch makes changes to make new workqueue features available to its users. * Now that workqueue is more featureful, there should be a public workqueue creation function which takes paramters to control them. Rename __create_workqueue() to alloc_workqueue() and make 0 max_active mean WQ_DFL_ACTIVE. In the long run, all create_workqueue_*() will be converted over to alloc_workqueue(). * To further unify access interface, rename keventd_wq to system_wq and export it. * Add system_long_wq and system_nrt_wq. The former is to host long running works separately (so that flush_scheduled_work() dosen't take so long) and the latter guarantees any queued work item is never executed in parallel by multiple CPUs. These will be used by future patches to update workqueue users. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 42 +++++++++++++++++++++++++----------------- 1 file changed, 25 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4190e84..16ce617 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -210,6 +210,13 @@ struct workqueue_struct { #endif }; +struct workqueue_struct *system_wq __read_mostly; +struct workqueue_struct *system_long_wq __read_mostly; +struct workqueue_struct *system_nrt_wq __read_mostly; +EXPORT_SYMBOL_GPL(system_wq); +EXPORT_SYMBOL_GPL(system_long_wq); +EXPORT_SYMBOL_GPL(system_nrt_wq); + #define for_each_busy_worker(worker, i, pos, gcwq) \ for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) @@ -2306,8 +2313,6 @@ int cancel_delayed_work_sync(struct delayed_work *dwork) } EXPORT_SYMBOL(cancel_delayed_work_sync); -static struct workqueue_struct *keventd_wq __read_mostly; - /** * schedule_work - put work task in global workqueue * @work: job to be done @@ -2321,7 +2326,7 @@ static struct workqueue_struct *keventd_wq __read_mostly; */ int schedule_work(struct work_struct *work) { - return queue_work(keventd_wq, work); + return queue_work(system_wq, work); } EXPORT_SYMBOL(schedule_work); @@ -2334,7 +2339,7 @@ EXPORT_SYMBOL(schedule_work); */ int schedule_work_on(int cpu, struct work_struct *work) { - return queue_work_on(cpu, keventd_wq, work); + return queue_work_on(cpu, system_wq, work); } EXPORT_SYMBOL(schedule_work_on); @@ -2349,7 +2354,7 @@ EXPORT_SYMBOL(schedule_work_on); int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) { - return queue_delayed_work(keventd_wq, dwork, delay); + return queue_delayed_work(system_wq, dwork, delay); } EXPORT_SYMBOL(schedule_delayed_work); @@ -2382,7 +2387,7 @@ EXPORT_SYMBOL(flush_delayed_work); int schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay) { - return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); + return queue_delayed_work_on(cpu, system_wq, dwork, delay); } EXPORT_SYMBOL(schedule_delayed_work_on); @@ -2447,7 +2452,7 @@ int schedule_on_each_cpu(work_func_t func) */ void flush_scheduled_work(void) { - flush_workqueue(keventd_wq); + flush_workqueue(system_wq); } EXPORT_SYMBOL(flush_scheduled_work); @@ -2479,7 +2484,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_context); int keventd_up(void) { - return keventd_wq != NULL; + return system_wq != NULL; } static struct cpu_workqueue_struct *alloc_cwqs(void) @@ -2539,15 +2544,16 @@ static int wq_clamp_max_active(int max_active, const char *name) return clamp_val(max_active, 1, WQ_MAX_ACTIVE); } -struct workqueue_struct *__create_workqueue_key(const char *name, - unsigned int flags, - int max_active, - struct lock_class_key *key, - const char *lock_name) +struct workqueue_struct *__alloc_workqueue_key(const char *name, + unsigned int flags, + int max_active, + struct lock_class_key *key, + const char *lock_name) { struct workqueue_struct *wq; unsigned int cpu; + max_active = max_active ?: WQ_DFL_ACTIVE; max_active = wq_clamp_max_active(max_active, name); wq = kzalloc(sizeof(*wq), GFP_KERNEL); @@ -2626,7 +2632,7 @@ err: } return NULL; } -EXPORT_SYMBOL_GPL(__create_workqueue_key); +EXPORT_SYMBOL_GPL(__alloc_workqueue_key); /** * destroy_workqueue - safely terminate a workqueue @@ -2910,7 +2916,7 @@ static int __cpuinit trustee_thread(void *__gcwq) continue; debug_work_activate(rebind_work); - insert_work(get_cwq(gcwq->cpu, keventd_wq), rebind_work, + insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work, worker->scheduled.next, work_color_to_flags(WORK_NO_COLOR)); } @@ -3287,6 +3293,8 @@ void __init init_workqueues(void) spin_unlock_irq(&gcwq->lock); } - keventd_wq = __create_workqueue("events", 0, WQ_DFL_ACTIVE); - BUG_ON(!keventd_wq); + system_wq = alloc_workqueue("events", 0, 0); + system_long_wq = alloc_workqueue("events_long", 0, 0); + system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); + BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq); } -- cgit v1.1 From dcd989cb73ab0f7b722d64ab6516f101d9f43f88 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:14 +0200 Subject: workqueue: implement several utility APIs Implement the following utility APIs. workqueue_set_max_active() : adjust max_active of a wq workqueue_congested() : test whether a wq is contested work_cpu() : determine the last / current cpu of a work work_busy() : query whether a work is busy * Anton Blanchard fixed missing ret initialization in work_busy(). Signed-off-by: Tejun Heo Cc: Anton Blanchard --- kernel/workqueue.c | 108 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 107 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 16ce617..c1aa65c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -203,7 +203,7 @@ struct workqueue_struct { cpumask_var_t mayday_mask; /* cpus requesting rescue */ struct worker *rescuer; /* I: rescue worker */ - int saved_max_active; /* I: saved cwq max_active */ + int saved_max_active; /* W: saved cwq max_active */ const char *name; /* I: workqueue name */ #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; @@ -2675,6 +2675,112 @@ void destroy_workqueue(struct workqueue_struct *wq) } EXPORT_SYMBOL_GPL(destroy_workqueue); +/** + * workqueue_set_max_active - adjust max_active of a workqueue + * @wq: target workqueue + * @max_active: new max_active value. + * + * Set max_active of @wq to @max_active. + * + * CONTEXT: + * Don't call from IRQ context. + */ +void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) +{ + unsigned int cpu; + + max_active = wq_clamp_max_active(max_active, wq->name); + + spin_lock(&workqueue_lock); + + wq->saved_max_active = max_active; + + for_each_possible_cpu(cpu) { + struct global_cwq *gcwq = get_gcwq(cpu); + + spin_lock_irq(&gcwq->lock); + + if (!(wq->flags & WQ_FREEZEABLE) || + !(gcwq->flags & GCWQ_FREEZING)) + get_cwq(gcwq->cpu, wq)->max_active = max_active; + + spin_unlock_irq(&gcwq->lock); + } + + spin_unlock(&workqueue_lock); +} +EXPORT_SYMBOL_GPL(workqueue_set_max_active); + +/** + * workqueue_congested - test whether a workqueue is congested + * @cpu: CPU in question + * @wq: target workqueue + * + * Test whether @wq's cpu workqueue for @cpu is congested. There is + * no synchronization around this function and the test result is + * unreliable and only useful as advisory hints or for debugging. + * + * RETURNS: + * %true if congested, %false otherwise. + */ +bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) +{ + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + + return !list_empty(&cwq->delayed_works); +} +EXPORT_SYMBOL_GPL(workqueue_congested); + +/** + * work_cpu - return the last known associated cpu for @work + * @work: the work of interest + * + * RETURNS: + * CPU number if @work was ever queued. NR_CPUS otherwise. + */ +unsigned int work_cpu(struct work_struct *work) +{ + struct global_cwq *gcwq = get_work_gcwq(work); + + return gcwq ? gcwq->cpu : NR_CPUS; +} +EXPORT_SYMBOL_GPL(work_cpu); + +/** + * work_busy - test whether a work is currently pending or running + * @work: the work to be tested + * + * Test whether @work is currently pending or running. There is no + * synchronization around this function and the test result is + * unreliable and only useful as advisory hints or for debugging. + * Especially for reentrant wqs, the pending state might hide the + * running state. + * + * RETURNS: + * OR'd bitmask of WORK_BUSY_* bits. + */ +unsigned int work_busy(struct work_struct *work) +{ + struct global_cwq *gcwq = get_work_gcwq(work); + unsigned long flags; + unsigned int ret = 0; + + if (!gcwq) + return false; + + spin_lock_irqsave(&gcwq->lock, flags); + + if (work_pending(work)) + ret |= WORK_BUSY_PENDING; + if (find_worker_executing_work(gcwq, work)) + ret |= WORK_BUSY_RUNNING; + + spin_unlock_irqrestore(&gcwq->lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(work_busy); + /* * CPU hotplug. * -- cgit v1.1 From 649027d73a6309ac34dc2886362e662bd73456dc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:14 +0200 Subject: workqueue: implement high priority workqueue This patch implements high priority workqueue which can be specified with WQ_HIGHPRI flag on creation. A high priority workqueue has the following properties. * A work queued to it is queued at the head of the worklist of the respective gcwq after other highpri works, while normal works are always appended at the end. * As long as there are highpri works on gcwq->worklist, [__]need_more_worker() remains %true and process_one_work() wakes up another worker before it start executing a work. The above two properties guarantee that works queued to high priority workqueues are dispatched to workers and start execution as soon as possible regardless of the state of other works. Signed-off-by: Tejun Heo Cc: Andi Kleen Cc: Andrew Morton --- kernel/workqueue.c | 70 +++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 64 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c1aa65c..5775717 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -43,6 +43,7 @@ enum { GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */ GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ GCWQ_FREEZING = 1 << 3, /* freeze in progress */ + GCWQ_HIGHPRI_PENDING = 1 << 4, /* highpri works on queue */ /* worker flags */ WORKER_STARTED = 1 << 0, /* started */ @@ -452,15 +453,19 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work) * assume that they're being called with gcwq->lock held. */ +static bool __need_more_worker(struct global_cwq *gcwq) +{ + return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) || + gcwq->flags & GCWQ_HIGHPRI_PENDING; +} + /* * Need to wake up a worker? Called from anything but currently * running workers. */ static bool need_more_worker(struct global_cwq *gcwq) { - atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); - - return !list_empty(&gcwq->worklist) && !atomic_read(nr_running); + return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq); } /* Can I start working? Called from busy but !running workers. */ @@ -734,6 +739,43 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq, } /** + * gcwq_determine_ins_pos - find insertion position + * @gcwq: gcwq of interest + * @cwq: cwq a work is being queued for + * + * A work for @cwq is about to be queued on @gcwq, determine insertion + * position for the work. If @cwq is for HIGHPRI wq, the work is + * queued at the head of the queue but in FIFO order with respect to + * other HIGHPRI works; otherwise, at the end of the queue. This + * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that + * there are HIGHPRI works pending. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock). + * + * RETURNS: + * Pointer to inserstion position. + */ +static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq, + struct cpu_workqueue_struct *cwq) +{ + struct work_struct *twork; + + if (likely(!(cwq->wq->flags & WQ_HIGHPRI))) + return &gcwq->worklist; + + list_for_each_entry(twork, &gcwq->worklist, entry) { + struct cpu_workqueue_struct *tcwq = get_work_cwq(twork); + + if (!(tcwq->wq->flags & WQ_HIGHPRI)) + break; + } + + gcwq->flags |= GCWQ_HIGHPRI_PENDING; + return &twork->entry; +} + +/** * insert_work - insert a work into gcwq * @cwq: cwq @work belongs to * @work: work to insert @@ -770,7 +812,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq, */ smp_mb(); - if (!atomic_read(get_gcwq_nr_running(gcwq->cpu))) + if (__need_more_worker(gcwq)) wake_up_worker(gcwq); } @@ -887,7 +929,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, if (likely(cwq->nr_active < cwq->max_active)) { cwq->nr_active++; - worklist = &gcwq->worklist; + worklist = gcwq_determine_ins_pos(gcwq, cwq); } else worklist = &cwq->delayed_works; @@ -1526,8 +1568,9 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) { struct work_struct *work = list_first_entry(&cwq->delayed_works, struct work_struct, entry); + struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); - move_linked_works(work, &cwq->gcwq->worklist, NULL); + move_linked_works(work, pos, NULL); cwq->nr_active++; } @@ -1634,6 +1677,21 @@ static void process_one_work(struct worker *worker, struct work_struct *work) set_work_cpu(work, gcwq->cpu); list_del_init(&work->entry); + /* + * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI, + * wake up another worker; otherwise, clear HIGHPRI_PENDING. + */ + if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) { + struct work_struct *nwork = list_first_entry(&gcwq->worklist, + struct work_struct, entry); + + if (!list_empty(&gcwq->worklist) && + get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI) + wake_up_worker(gcwq); + else + gcwq->flags &= ~GCWQ_HIGHPRI_PENDING; + } + spin_unlock_irq(&gcwq->lock); work_clear_pending(work); -- cgit v1.1 From fb0e7beb5c1b6fb4da786ba709d7138373d5fb22 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:15 +0200 Subject: workqueue: implement cpu intensive workqueue This patch implements cpu intensive workqueue which can be specified with WQ_CPU_INTENSIVE flag on creation. Works queued to a cpu intensive workqueue don't participate in concurrency management. IOW, it doesn't contribute to gcwq->nr_running and thus doesn't delay excution of other works. Note that although cpu intensive works won't delay other works, they can be delayed by other works. Combine with WQ_HIGHPRI to avoid being delayed by other works too. As the name suggests this is useful when using workqueue for cpu intensive works. Workers executing cpu intensive works are not considered for workqueue concurrency management and left for the scheduler to manage. Signed-off-by: Tejun Heo Cc: Andrew Morton --- kernel/workqueue.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 5775717..6fa847c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -52,8 +52,10 @@ enum { WORKER_PREP = 1 << 3, /* preparing to run works */ WORKER_ROGUE = 1 << 4, /* not bound to any cpu */ WORKER_REBIND = 1 << 5, /* mom is home, come back */ + WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ - WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND, + WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND | + WORKER_CPU_INTENSIVE, /* gcwq->trustee_state */ TRUSTEE_START = 0, /* start */ @@ -1641,6 +1643,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) struct cpu_workqueue_struct *cwq = get_work_cwq(work); struct global_cwq *gcwq = cwq->gcwq; struct hlist_head *bwh = busy_worker_head(gcwq, work); + bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; work_func_t f = work->func; int work_color; struct worker *collision; @@ -1692,6 +1695,13 @@ static void process_one_work(struct worker *worker, struct work_struct *work) gcwq->flags &= ~GCWQ_HIGHPRI_PENDING; } + /* + * CPU intensive works don't participate in concurrency + * management. They're the scheduler's responsibility. + */ + if (unlikely(cpu_intensive)) + worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); + spin_unlock_irq(&gcwq->lock); work_clear_pending(work); @@ -1713,6 +1723,10 @@ static void process_one_work(struct worker *worker, struct work_struct *work) spin_lock_irq(&gcwq->lock); + /* clear cpu intensive status */ + if (unlikely(cpu_intensive)) + worker_clr_flags(worker, WORKER_CPU_INTENSIVE); + /* we're done with it, release */ hlist_del_init(&worker->hentry); worker->current_work = NULL; -- cgit v1.1 From 2ec57d448b2e8fcfba539a46701b43f14f037f17 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Tue, 29 Jun 2010 12:02:01 +1000 Subject: sched: Fix spelling of sibling No logic changes, only spelling. Signed-off-by: Michael Neuling Cc: linuxppc-dev@ozlabs.org Cc: David Howells Cc: Peter Zijlstra LKML-Reference: <15249.1277776921@neuling.org> Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5e8f98c..b4da534 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -2567,7 +2567,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, } while (sg != sd->groups); } -int __weak arch_sd_sibiling_asym_packing(void) +int __weak arch_sd_sibling_asym_packing(void) { return 0*SD_ASYM_PACKING; } -- cgit v1.1 From cb444766996395d4370bcc17ec895dd4e13ceb72 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 10:03:50 +0200 Subject: workqueue: use worker_set/clr_flags() only from worker itself worker_set/clr_flags() assume that if none of NOT_RUNNING flags is set the worker must be contributing to nr_running which is only true if the worker is actually running. As when called from self, it is guaranteed that the worker is running, those functions can be safely used from the worker itself and they aren't necessary from other places anyway. Make the following changes to fix the bug. * Make worker_set/clr_flags() whine if not called from self. * Convert all places which called those functions from other tasks to manipulate flags directly. * Make trustee_thread() directly clear nr_running after setting WORKER_ROGUE on all workers. This is the only place where nr_running manipulation is necessary outside of workers themselves. * While at it, add sanity check for nr_running in worker_enter_idle(). Signed-off-by: Tejun Heo --- kernel/workqueue.c | 47 ++++++++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 6fa847c..5587338 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -601,7 +601,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, /** * worker_set_flags - set worker flags and adjust nr_running accordingly - * @worker: worker to set flags for + * @worker: self * @flags: flags to set * @wakeup: wakeup an idle worker if necessary * @@ -609,14 +609,16 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, * nr_running becomes zero and @wakeup is %true, an idle worker is * woken up. * - * LOCKING: - * spin_lock_irq(gcwq->lock). + * CONTEXT: + * spin_lock_irq(gcwq->lock) */ static inline void worker_set_flags(struct worker *worker, unsigned int flags, bool wakeup) { struct global_cwq *gcwq = worker->gcwq; + WARN_ON_ONCE(worker->task != current); + /* * If transitioning into NOT_RUNNING, adjust nr_running and * wake up an idle worker as necessary if requested by @@ -639,19 +641,21 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags, /** * worker_clr_flags - clear worker flags and adjust nr_running accordingly - * @worker: worker to set flags for + * @worker: self * @flags: flags to clear * * Clear @flags in @worker->flags and adjust nr_running accordingly. * - * LOCKING: - * spin_lock_irq(gcwq->lock). + * CONTEXT: + * spin_lock_irq(gcwq->lock) */ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) { struct global_cwq *gcwq = worker->gcwq; unsigned int oflags = worker->flags; + WARN_ON_ONCE(worker->task != current); + worker->flags &= ~flags; /* if transitioning out of NOT_RUNNING, increment nr_running */ @@ -1073,7 +1077,8 @@ static void worker_enter_idle(struct worker *worker) BUG_ON(!list_empty(&worker->entry) && (worker->hentry.next || worker->hentry.pprev)); - worker_set_flags(worker, WORKER_IDLE, false); + /* can't use worker_set_flags(), also called from start_worker() */ + worker->flags |= WORKER_IDLE; gcwq->nr_idle++; worker->last_active = jiffies; @@ -1086,6 +1091,10 @@ static void worker_enter_idle(struct worker *worker) jiffies + IDLE_WORKER_TIMEOUT); } else wake_up_all(&gcwq->trustee_wait); + + /* sanity check nr_running */ + WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle && + atomic_read(get_gcwq_nr_running(gcwq->cpu))); } /** @@ -1270,7 +1279,7 @@ fail: */ static void start_worker(struct worker *worker) { - worker_set_flags(worker, WORKER_STARTED, false); + worker->flags |= WORKER_STARTED; worker->gcwq->nr_workers++; worker_enter_idle(worker); wake_up_process(worker->task); @@ -1300,7 +1309,7 @@ static void destroy_worker(struct worker *worker) gcwq->nr_idle--; list_del_init(&worker->entry); - worker_set_flags(worker, WORKER_DIE, false); + worker->flags |= WORKER_DIE; spin_unlock_irq(&gcwq->lock); @@ -2979,10 +2988,10 @@ static int __cpuinit trustee_thread(void *__gcwq) gcwq->flags |= GCWQ_MANAGING_WORKERS; list_for_each_entry(worker, &gcwq->idle_list, entry) - worker_set_flags(worker, WORKER_ROGUE, false); + worker->flags |= WORKER_ROGUE; for_each_busy_worker(worker, i, pos, gcwq) - worker_set_flags(worker, WORKER_ROGUE, false); + worker->flags |= WORKER_ROGUE; /* * Call schedule() so that we cross rq->lock and thus can @@ -2995,12 +3004,12 @@ static int __cpuinit trustee_thread(void *__gcwq) spin_lock_irq(&gcwq->lock); /* - * Sched callbacks are disabled now. gcwq->nr_running should - * be zero and will stay that way, making need_more_worker() - * and keep_working() always return true as long as the - * worklist is not empty. + * Sched callbacks are disabled now. Zap nr_running. After + * this, nr_running stays zero and need_more_worker() and + * keep_working() are always true as long as the worklist is + * not empty. */ - WARN_ON_ONCE(atomic_read(get_gcwq_nr_running(gcwq->cpu)) != 0); + atomic_set(get_gcwq_nr_running(gcwq->cpu), 0); spin_unlock_irq(&gcwq->lock); del_timer_sync(&gcwq->idle_timer); @@ -3046,7 +3055,7 @@ static int __cpuinit trustee_thread(void *__gcwq) worker = create_worker(gcwq, false); spin_lock_irq(&gcwq->lock); if (worker) { - worker_set_flags(worker, WORKER_ROGUE, false); + worker->flags |= WORKER_ROGUE; start_worker(worker); } } @@ -3085,8 +3094,8 @@ static int __cpuinit trustee_thread(void *__gcwq) * operations. Use a separate flag to mark that * rebinding is scheduled. */ - worker_set_flags(worker, WORKER_REBIND, false); - worker_clr_flags(worker, WORKER_ROGUE); + worker->flags |= WORKER_REBIND; + worker->flags &= ~WORKER_ROGUE; /* queue rebind_work, wq doesn't matter, use the default one */ if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, -- cgit v1.1 From 4ce48b37bfedc2bc11e61eae76784887e88b922c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 10:03:51 +0200 Subject: workqueue: fix race condition in flush_workqueue() When one flusher is cascading to the next flusher, it first sets wq->first_flusher to the next one and sets up the next flush cycle. If there's nothing to do for the next cycle, it clears wq->flush_flusher and proceeds to the one after that. If the woken up flusher checks wq->first_flusher before it gets cleared, it will incorrectly assume the role of the first flusher, which triggers BUG_ON() sanity check. Fix it by checking wq->first_flusher again after grabbing the mutex. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 5587338..b59c946 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2138,6 +2138,10 @@ void flush_workqueue(struct workqueue_struct *wq) mutex_lock(&wq->flush_mutex); + /* we might have raced, check again with mutex held */ + if (wq->first_flusher != &this_flusher) + goto out_unlock; + wq->first_flusher = NULL; BUG_ON(!list_empty(&this_flusher.list)); -- cgit v1.1 From a1e453d2799760ecf2e09ecd45b80edbe7ff540e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 10:03:51 +0200 Subject: workqueue: fix incorrect cpu number BUG_ON() in get_work_gcwq() get_work_gcwq() was incorrectly triggering BUG_ON() if cpu number is equal to or higher than num_possible_cpus() instead of nr_cpu_ids. Fix it. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b59c946..0c485a5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -445,7 +445,7 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work) if (cpu == NR_CPUS) return NULL; - BUG_ON(cpu >= num_possible_cpus()); + BUG_ON(cpu >= nr_cpu_ids); return get_gcwq(cpu); } -- cgit v1.1 From d313dd85ad846bc768d58e9ceb28588f917f4c9a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 10:03:51 +0200 Subject: workqueue: fix worker management invocation without pending works When there's no pending work to do, worker_thread() goes back to sleep after waking up without checking whether worker management is necessary. This means that idle worker exit requests can be ignored if the gcwq stays empty. Fix it by making worker_thread() always check whether worker management is necessary before going to sleep. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0c485a5..2eb9fbd 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1832,10 +1832,10 @@ recheck: } while (keep_working(gcwq)); worker_set_flags(worker, WORKER_PREP, false); - +sleep: if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker)) goto recheck; -sleep: + /* * gcwq->lock is held and there's no work to process and no * need to manage, sleep. Workers are woken up only while -- cgit v1.1 From bdbc5dd7de5d07d6c9d3536e598956165a031d4c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 10:03:51 +0200 Subject: workqueue: prepare for WQ_UNBOUND implementation In preparation of WQ_UNBOUND addition, make the following changes. * Add WORK_CPU_* constants for pseudo cpu id numbers used (currently only WORK_CPU_NONE) and use them instead of NR_CPUS. This is to allow another pseudo cpu id for unbound cpu. * Reorder WQ_* flags. * Make workqueue_struct->cpu_wq a union which contains a percpu pointer, regular pointer and an unsigned long value and use kzalloc/kfree() in UP allocation path. This will be used to implement unbound workqueues which will use only one cwq on SMPs. * Move alloc_cwqs() allocation after initialization of wq fields, so that alloc_cwqs() has access to wq->flags. * Trivial relocation of wq local variables in freeze functions. These changes don't cause any functional change. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 83 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 43 insertions(+), 40 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2eb9fbd..a105ddf 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -190,7 +190,11 @@ struct wq_flusher { */ struct workqueue_struct { unsigned int flags; /* I: WQ_* flags */ - struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */ + union { + struct cpu_workqueue_struct __percpu *pcpu; + struct cpu_workqueue_struct *single; + unsigned long v; + } cpu_wq; /* I: cwq's */ struct list_head list; /* W: list of all workqueues */ struct mutex flush_mutex; /* protects wq flushing */ @@ -362,7 +366,11 @@ static atomic_t *get_gcwq_nr_running(unsigned int cpu) static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, struct workqueue_struct *wq) { - return per_cpu_ptr(wq->cpu_wq, cpu); +#ifndef CONFIG_SMP + return wq->cpu_wq.single; +#else + return per_cpu_ptr(wq->cpu_wq.pcpu, cpu); +#endif } static unsigned int work_color_to_flags(int color) @@ -442,7 +450,7 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work) return ((struct cpu_workqueue_struct *)data)->gcwq; cpu = data >> WORK_STRUCT_FLAG_BITS; - if (cpu == NR_CPUS) + if (cpu == WORK_CPU_NONE) return NULL; BUG_ON(cpu >= nr_cpu_ids); @@ -846,7 +854,7 @@ static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq) */ if (likely(!(gcwq->flags & GCWQ_FREEZING))) { smp_wmb(); /* paired with cmpxchg() in __queue_work() */ - wq->single_cpu = NR_CPUS; + wq->single_cpu = WORK_CPU_NONE; } } @@ -904,7 +912,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, */ retry: cpu = wq->single_cpu; - arbitrate = cpu == NR_CPUS; + arbitrate = cpu == WORK_CPU_NONE; if (arbitrate) cpu = req_cpu; @@ -918,7 +926,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, * visible on the new cpu after this point. */ if (arbitrate) - cmpxchg(&wq->single_cpu, NR_CPUS, cpu); + cmpxchg(&wq->single_cpu, WORK_CPU_NONE, cpu); if (unlikely(wq->single_cpu != cpu)) { spin_unlock_irqrestore(&gcwq->lock, flags); @@ -2572,7 +2580,7 @@ int keventd_up(void) return system_wq != NULL; } -static struct cpu_workqueue_struct *alloc_cwqs(void) +static int alloc_cwqs(struct workqueue_struct *wq) { /* * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. @@ -2582,40 +2590,36 @@ static struct cpu_workqueue_struct *alloc_cwqs(void) const size_t size = sizeof(struct cpu_workqueue_struct); const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, __alignof__(unsigned long long)); - struct cpu_workqueue_struct *cwqs; #ifndef CONFIG_SMP void *ptr; /* - * On UP, percpu allocator doesn't honor alignment parameter - * and simply uses arch-dependent default. Allocate enough - * room to align cwq and put an extra pointer at the end - * pointing back to the originally allocated pointer which - * will be used for free. - * - * FIXME: This really belongs to UP percpu code. Update UP - * percpu code to honor alignment and remove this ugliness. + * Allocate enough room to align cwq and put an extra pointer + * at the end pointing back to the originally allocated + * pointer which will be used for free. */ - ptr = __alloc_percpu(size + align + sizeof(void *), 1); - cwqs = PTR_ALIGN(ptr, align); - *(void **)per_cpu_ptr(cwqs + 1, 0) = ptr; + ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL); + if (ptr) { + wq->cpu_wq.single = PTR_ALIGN(ptr, align); + *(void **)(wq->cpu_wq.single + 1) = ptr; + } #else - /* On SMP, percpu allocator can do it itself */ - cwqs = __alloc_percpu(size, align); + /* On SMP, percpu allocator can align itself */ + wq->cpu_wq.pcpu = __alloc_percpu(size, align); #endif /* just in case, make sure it's actually aligned */ - BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align)); - return cwqs; + BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); + return wq->cpu_wq.v ? 0 : -ENOMEM; } -static void free_cwqs(struct cpu_workqueue_struct *cwqs) +static void free_cwqs(struct workqueue_struct *wq) { #ifndef CONFIG_SMP /* on UP, the pointer to free is stored right after the cwq */ - if (cwqs) - free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0)); + if (wq->cpu_wq.single) + kfree(*(void **)(wq->cpu_wq.single + 1)); #else - free_percpu(cwqs); + free_percpu(wq->cpu_wq.pcpu); #endif } @@ -2645,22 +2649,21 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, if (!wq) goto err; - wq->cpu_wq = alloc_cwqs(); - if (!wq->cpu_wq) - goto err; - wq->flags = flags; wq->saved_max_active = max_active; mutex_init(&wq->flush_mutex); atomic_set(&wq->nr_cwqs_to_flush, 0); INIT_LIST_HEAD(&wq->flusher_queue); INIT_LIST_HEAD(&wq->flusher_overflow); - wq->single_cpu = NR_CPUS; + wq->single_cpu = WORK_CPU_NONE; wq->name = name; lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); INIT_LIST_HEAD(&wq->list); + if (alloc_cwqs(wq) < 0) + goto err; + for_each_possible_cpu(cpu) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct global_cwq *gcwq = get_gcwq(cpu); @@ -2710,7 +2713,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, return wq; err: if (wq) { - free_cwqs(wq->cpu_wq); + free_cwqs(wq); free_cpumask_var(wq->mayday_mask); kfree(wq->rescuer); kfree(wq); @@ -2755,7 +2758,7 @@ void destroy_workqueue(struct workqueue_struct *wq) free_cpumask_var(wq->mayday_mask); } - free_cwqs(wq->cpu_wq); + free_cwqs(wq); kfree(wq); } EXPORT_SYMBOL_GPL(destroy_workqueue); @@ -2821,13 +2824,13 @@ EXPORT_SYMBOL_GPL(workqueue_congested); * @work: the work of interest * * RETURNS: - * CPU number if @work was ever queued. NR_CPUS otherwise. + * CPU number if @work was ever queued. WORK_CPU_NONE otherwise. */ unsigned int work_cpu(struct work_struct *work) { struct global_cwq *gcwq = get_work_gcwq(work); - return gcwq ? gcwq->cpu : NR_CPUS; + return gcwq ? gcwq->cpu : WORK_CPU_NONE; } EXPORT_SYMBOL_GPL(work_cpu); @@ -3300,7 +3303,6 @@ EXPORT_SYMBOL_GPL(work_on_cpu); */ void freeze_workqueues_begin(void) { - struct workqueue_struct *wq; unsigned int cpu; spin_lock(&workqueue_lock); @@ -3310,6 +3312,7 @@ void freeze_workqueues_begin(void) for_each_possible_cpu(cpu) { struct global_cwq *gcwq = get_gcwq(cpu); + struct workqueue_struct *wq; spin_lock_irq(&gcwq->lock); @@ -3344,7 +3347,6 @@ void freeze_workqueues_begin(void) */ bool freeze_workqueues_busy(void) { - struct workqueue_struct *wq; unsigned int cpu; bool busy = false; @@ -3353,6 +3355,7 @@ bool freeze_workqueues_busy(void) BUG_ON(!workqueue_freezing); for_each_possible_cpu(cpu) { + struct workqueue_struct *wq; /* * nr_active is monotonically decreasing. It's safe * to peek without lock. @@ -3386,7 +3389,6 @@ out_unlock: */ void thaw_workqueues(void) { - struct workqueue_struct *wq; unsigned int cpu; spin_lock(&workqueue_lock); @@ -3396,6 +3398,7 @@ void thaw_workqueues(void) for_each_possible_cpu(cpu) { struct global_cwq *gcwq = get_gcwq(cpu); + struct workqueue_struct *wq; spin_lock_irq(&gcwq->lock); @@ -3443,7 +3446,7 @@ void __init init_workqueues(void) * sure cpu number won't overflow into kernel pointer area so * that they can be distinguished. */ - BUILD_BUG_ON(NR_CPUS << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET); + BUILD_BUG_ON(WORK_CPU_LAST << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET); hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); -- cgit v1.1 From f34217977d717385a3e9fd7018ac39fade3964c0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 10:03:51 +0200 Subject: workqueue: implement unbound workqueue This patch implements unbound workqueue which can be specified with WQ_UNBOUND flag on creation. An unbound workqueue has the following properties. * It uses a dedicated gcwq with a pseudo CPU number WORK_CPU_UNBOUND. This gcwq is always online and disassociated. * Workers are not bound to any CPU and not concurrency managed. Works are dispatched to workers as soon as possible and the only applied limitation is @max_active. IOW, all unbound workqeueues are implicitly high priority. Unbound workqueues can be used as simple execution context provider. Contexts unbound to any cpu are served as soon as possible. Signed-off-by: Tejun Heo Cc: Arjan van de Ven Cc: David Howells --- kernel/workqueue.c | 218 ++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 159 insertions(+), 59 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a105ddf..4608563 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -53,9 +53,10 @@ enum { WORKER_ROGUE = 1 << 4, /* not bound to any cpu */ WORKER_REBIND = 1 << 5, /* mom is home, come back */ WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ + WORKER_UNBOUND = 1 << 7, /* worker is unbound */ WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND | - WORKER_CPU_INTENSIVE, + WORKER_CPU_INTENSIVE | WORKER_UNBOUND, /* gcwq->trustee_state */ TRUSTEE_START = 0, /* start */ @@ -96,7 +97,7 @@ enum { * X: During normal operation, modification requires gcwq->lock and * should be done only from local cpu. Either disabling preemption * on local cpu or grabbing gcwq->lock is enough for read access. - * While trustee is in charge, it's identical to L. + * If GCWQ_DISASSOCIATED is set, it's identical to L. * * F: wq->flush_mutex protected. * @@ -220,14 +221,52 @@ struct workqueue_struct { struct workqueue_struct *system_wq __read_mostly; struct workqueue_struct *system_long_wq __read_mostly; struct workqueue_struct *system_nrt_wq __read_mostly; +struct workqueue_struct *system_unbound_wq __read_mostly; EXPORT_SYMBOL_GPL(system_wq); EXPORT_SYMBOL_GPL(system_long_wq); EXPORT_SYMBOL_GPL(system_nrt_wq); +EXPORT_SYMBOL_GPL(system_unbound_wq); #define for_each_busy_worker(worker, i, pos, gcwq) \ for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) +static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask, + unsigned int sw) +{ + if (cpu < nr_cpu_ids) { + if (sw & 1) { + cpu = cpumask_next(cpu, mask); + if (cpu < nr_cpu_ids) + return cpu; + } + if (sw & 2) + return WORK_CPU_UNBOUND; + } + return WORK_CPU_NONE; +} + +static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, + struct workqueue_struct *wq) +{ + return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); +} + +#define for_each_gcwq_cpu(cpu) \ + for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \ + (cpu) < WORK_CPU_NONE; \ + (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3)) + +#define for_each_online_gcwq_cpu(cpu) \ + for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \ + (cpu) < WORK_CPU_NONE; \ + (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3)) + +#define for_each_cwq_cpu(cpu, wq) \ + for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \ + (cpu) < WORK_CPU_NONE; \ + (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq))) + #ifdef CONFIG_DEBUG_OBJECTS_WORK static struct debug_obj_descr work_debug_descr; @@ -351,26 +390,46 @@ static bool workqueue_freezing; /* W: have wqs started freezing? */ static DEFINE_PER_CPU(struct global_cwq, global_cwq); static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running); +/* + * Global cpu workqueue and nr_running counter for unbound gcwq. The + * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its + * workers have WORKER_UNBOUND set. + */ +static struct global_cwq unbound_global_cwq; +static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0); /* always 0 */ + static int worker_thread(void *__worker); static struct global_cwq *get_gcwq(unsigned int cpu) { - return &per_cpu(global_cwq, cpu); + if (cpu != WORK_CPU_UNBOUND) + return &per_cpu(global_cwq, cpu); + else + return &unbound_global_cwq; } static atomic_t *get_gcwq_nr_running(unsigned int cpu) { - return &per_cpu(gcwq_nr_running, cpu); + if (cpu != WORK_CPU_UNBOUND) + return &per_cpu(gcwq_nr_running, cpu); + else + return &unbound_gcwq_nr_running; } static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, struct workqueue_struct *wq) { -#ifndef CONFIG_SMP - return wq->cpu_wq.single; + if (!(wq->flags & WQ_UNBOUND)) { + if (likely(cpu < nr_cpu_ids)) { +#ifdef CONFIG_SMP + return per_cpu_ptr(wq->cpu_wq.pcpu, cpu); #else - return per_cpu_ptr(wq->cpu_wq.pcpu, cpu); + return wq->cpu_wq.single; #endif + } + } else if (likely(cpu == WORK_CPU_UNBOUND)) + return wq->cpu_wq.single; + return NULL; } static unsigned int work_color_to_flags(int color) @@ -453,7 +512,7 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work) if (cpu == WORK_CPU_NONE) return NULL; - BUG_ON(cpu >= nr_cpu_ids); + BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND); return get_gcwq(cpu); } @@ -869,11 +928,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, debug_work_activate(work); + if (unlikely(cpu == WORK_CPU_UNBOUND)) + cpu = raw_smp_processor_id(); + /* * Determine gcwq to use. SINGLE_CPU is inherently * NON_REENTRANT, so test it first. */ - if (!(wq->flags & WQ_SINGLE_CPU)) { + if (!(wq->flags & (WQ_SINGLE_CPU | WQ_UNBOUND))) { struct global_cwq *last_gcwq; /* @@ -900,7 +962,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, } } else spin_lock_irqsave(&gcwq->lock, flags); - } else { + } else if (!(wq->flags & WQ_UNBOUND)) { unsigned int req_cpu = cpu; /* @@ -932,6 +994,9 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, spin_unlock_irqrestore(&gcwq->lock, flags); goto retry; } + } else { + gcwq = get_gcwq(WORK_CPU_UNBOUND); + spin_lock_irqsave(&gcwq->lock, flags); } /* gcwq determined, get cwq and queue */ @@ -1166,7 +1231,8 @@ static bool worker_maybe_bind_and_lock(struct worker *worker) * it races with cpu hotunplug operation. Verify * against GCWQ_DISASSOCIATED. */ - set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu)); + if (!(gcwq->flags & GCWQ_DISASSOCIATED)) + set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu)); spin_lock_irq(&gcwq->lock); if (gcwq->flags & GCWQ_DISASSOCIATED) @@ -1231,8 +1297,9 @@ static struct worker *alloc_worker(void) */ static struct worker *create_worker(struct global_cwq *gcwq, bool bind) { - int id = -1; + bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND; struct worker *worker = NULL; + int id = -1; spin_lock_irq(&gcwq->lock); while (ida_get_new(&gcwq->worker_ida, &id)) { @@ -1250,8 +1317,12 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind) worker->gcwq = gcwq; worker->id = id; - worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d", - gcwq->cpu, id); + if (!on_unbound_cpu) + worker->task = kthread_create(worker_thread, worker, + "kworker/%u:%d", gcwq->cpu, id); + else + worker->task = kthread_create(worker_thread, worker, + "kworker/u:%d", id); if (IS_ERR(worker->task)) goto fail; @@ -1260,10 +1331,13 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind) * online later on. Make sure every worker has * PF_THREAD_BOUND set. */ - if (bind) + if (bind && !on_unbound_cpu) kthread_bind(worker->task, gcwq->cpu); - else + else { worker->task->flags |= PF_THREAD_BOUND; + if (on_unbound_cpu) + worker->flags |= WORKER_UNBOUND; + } return worker; fail: @@ -1358,12 +1432,17 @@ static bool send_mayday(struct work_struct *work) { struct cpu_workqueue_struct *cwq = get_work_cwq(work); struct workqueue_struct *wq = cwq->wq; + unsigned int cpu; if (!(wq->flags & WQ_RESCUER)) return false; /* mayday mayday mayday */ - if (!cpumask_test_and_set_cpu(cwq->gcwq->cpu, wq->mayday_mask)) + cpu = cwq->gcwq->cpu; + /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ + if (cpu == WORK_CPU_UNBOUND) + cpu = 0; + if (!cpumask_test_and_set_cpu(cpu, wq->mayday_mask)) wake_up_process(wq->rescuer->task); return true; } @@ -1882,6 +1961,7 @@ static int rescuer_thread(void *__wq) struct workqueue_struct *wq = __wq; struct worker *rescuer = wq->rescuer; struct list_head *scheduled = &rescuer->scheduled; + bool is_unbound = wq->flags & WQ_UNBOUND; unsigned int cpu; set_user_nice(current, RESCUER_NICE_LEVEL); @@ -1891,8 +1971,13 @@ repeat: if (kthread_should_stop()) return 0; + /* + * See whether any cpu is asking for help. Unbounded + * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND. + */ for_each_cpu(cpu, wq->mayday_mask) { - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; + struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); struct global_cwq *gcwq = cwq->gcwq; struct work_struct *work, *n; @@ -2034,7 +2119,7 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, atomic_set(&wq->nr_cwqs_to_flush, 1); } - for_each_possible_cpu(cpu) { + for_each_cwq_cpu(cpu, wq) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct global_cwq *gcwq = cwq->gcwq; @@ -2344,7 +2429,7 @@ static void wait_on_work(struct work_struct *work) lock_map_acquire(&work->lockdep_map); lock_map_release(&work->lockdep_map); - for_each_possible_cpu(cpu) + for_each_gcwq_cpu(cpu) wait_on_cpu_work(get_gcwq(cpu), work); } @@ -2590,23 +2675,25 @@ static int alloc_cwqs(struct workqueue_struct *wq) const size_t size = sizeof(struct cpu_workqueue_struct); const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, __alignof__(unsigned long long)); -#ifndef CONFIG_SMP - void *ptr; - /* - * Allocate enough room to align cwq and put an extra pointer - * at the end pointing back to the originally allocated - * pointer which will be used for free. - */ - ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL); - if (ptr) { - wq->cpu_wq.single = PTR_ALIGN(ptr, align); - *(void **)(wq->cpu_wq.single + 1) = ptr; + if (CONFIG_SMP && !(wq->flags & WQ_UNBOUND)) { + /* on SMP, percpu allocator can align itself */ + wq->cpu_wq.pcpu = __alloc_percpu(size, align); + } else { + void *ptr; + + /* + * Allocate enough room to align cwq and put an extra + * pointer at the end pointing back to the originally + * allocated pointer which will be used for free. + */ + ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL); + if (ptr) { + wq->cpu_wq.single = PTR_ALIGN(ptr, align); + *(void **)(wq->cpu_wq.single + 1) = ptr; + } } -#else - /* On SMP, percpu allocator can align itself */ - wq->cpu_wq.pcpu = __alloc_percpu(size, align); -#endif + /* just in case, make sure it's actually aligned */ BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); return wq->cpu_wq.v ? 0 : -ENOMEM; @@ -2614,23 +2701,25 @@ static int alloc_cwqs(struct workqueue_struct *wq) static void free_cwqs(struct workqueue_struct *wq) { -#ifndef CONFIG_SMP - /* on UP, the pointer to free is stored right after the cwq */ - if (wq->cpu_wq.single) + if (CONFIG_SMP && !(wq->flags & WQ_UNBOUND)) + free_percpu(wq->cpu_wq.pcpu); + else if (wq->cpu_wq.single) { + /* the pointer to free is stored right after the cwq */ kfree(*(void **)(wq->cpu_wq.single + 1)); -#else - free_percpu(wq->cpu_wq.pcpu); -#endif + } } -static int wq_clamp_max_active(int max_active, const char *name) +static int wq_clamp_max_active(int max_active, unsigned int flags, + const char *name) { - if (max_active < 1 || max_active > WQ_MAX_ACTIVE) + int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; + + if (max_active < 1 || max_active > lim) printk(KERN_WARNING "workqueue: max_active %d requested for %s " "is out of range, clamping between %d and %d\n", - max_active, name, 1, WQ_MAX_ACTIVE); + max_active, name, 1, lim); - return clamp_val(max_active, 1, WQ_MAX_ACTIVE); + return clamp_val(max_active, 1, lim); } struct workqueue_struct *__alloc_workqueue_key(const char *name, @@ -2642,8 +2731,15 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, struct workqueue_struct *wq; unsigned int cpu; + /* + * Unbound workqueues aren't concurrency managed and should be + * dispatched to workers immediately. + */ + if (flags & WQ_UNBOUND) + flags |= WQ_HIGHPRI; + max_active = max_active ?: WQ_DFL_ACTIVE; - max_active = wq_clamp_max_active(max_active, name); + max_active = wq_clamp_max_active(max_active, flags, name); wq = kzalloc(sizeof(*wq), GFP_KERNEL); if (!wq) @@ -2664,7 +2760,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, if (alloc_cwqs(wq) < 0) goto err; - for_each_possible_cpu(cpu) { + for_each_cwq_cpu(cpu, wq) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct global_cwq *gcwq = get_gcwq(cpu); @@ -2703,7 +2799,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, spin_lock(&workqueue_lock); if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) - for_each_possible_cpu(cpu) + for_each_cwq_cpu(cpu, wq) get_cwq(cpu, wq)->max_active = 0; list_add(&wq->list, &workqueues); @@ -2743,7 +2839,7 @@ void destroy_workqueue(struct workqueue_struct *wq) spin_unlock(&workqueue_lock); /* sanity check */ - for_each_possible_cpu(cpu) { + for_each_cwq_cpu(cpu, wq) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); int i; @@ -2777,13 +2873,13 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) { unsigned int cpu; - max_active = wq_clamp_max_active(max_active, wq->name); + max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); spin_lock(&workqueue_lock); wq->saved_max_active = max_active; - for_each_possible_cpu(cpu) { + for_each_cwq_cpu(cpu, wq) { struct global_cwq *gcwq = get_gcwq(cpu); spin_lock_irq(&gcwq->lock); @@ -3310,7 +3406,7 @@ void freeze_workqueues_begin(void) BUG_ON(workqueue_freezing); workqueue_freezing = true; - for_each_possible_cpu(cpu) { + for_each_gcwq_cpu(cpu) { struct global_cwq *gcwq = get_gcwq(cpu); struct workqueue_struct *wq; @@ -3322,7 +3418,7 @@ void freeze_workqueues_begin(void) list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - if (wq->flags & WQ_FREEZEABLE) + if (cwq && wq->flags & WQ_FREEZEABLE) cwq->max_active = 0; } @@ -3354,7 +3450,7 @@ bool freeze_workqueues_busy(void) BUG_ON(!workqueue_freezing); - for_each_possible_cpu(cpu) { + for_each_gcwq_cpu(cpu) { struct workqueue_struct *wq; /* * nr_active is monotonically decreasing. It's safe @@ -3363,7 +3459,7 @@ bool freeze_workqueues_busy(void) list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - if (!(wq->flags & WQ_FREEZEABLE)) + if (!cwq || !(wq->flags & WQ_FREEZEABLE)) continue; BUG_ON(cwq->nr_active < 0); @@ -3396,7 +3492,7 @@ void thaw_workqueues(void) if (!workqueue_freezing) goto out_unlock; - for_each_possible_cpu(cpu) { + for_each_gcwq_cpu(cpu) { struct global_cwq *gcwq = get_gcwq(cpu); struct workqueue_struct *wq; @@ -3408,7 +3504,7 @@ void thaw_workqueues(void) list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - if (!(wq->flags & WQ_FREEZEABLE)) + if (!cwq || !(wq->flags & WQ_FREEZEABLE)) continue; /* restore max_active and repopulate worklist */ @@ -3451,12 +3547,14 @@ void __init init_workqueues(void) hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); /* initialize gcwqs */ - for_each_possible_cpu(cpu) { + for_each_gcwq_cpu(cpu) { struct global_cwq *gcwq = get_gcwq(cpu); spin_lock_init(&gcwq->lock); INIT_LIST_HEAD(&gcwq->worklist); gcwq->cpu = cpu; + if (cpu == WORK_CPU_UNBOUND) + gcwq->flags |= GCWQ_DISASSOCIATED; INIT_LIST_HEAD(&gcwq->idle_list); for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) @@ -3476,7 +3574,7 @@ void __init init_workqueues(void) } /* create the initial worker */ - for_each_online_cpu(cpu) { + for_each_online_gcwq_cpu(cpu) { struct global_cwq *gcwq = get_gcwq(cpu); struct worker *worker; @@ -3490,5 +3588,7 @@ void __init init_workqueues(void) system_wq = alloc_workqueue("events", 0, 0); system_long_wq = alloc_workqueue("events_long", 0, 0); system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); + system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, + WQ_UNBOUND_MAX_ACTIVE); BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq); } -- cgit v1.1 From c7fc77f78f16d138ca997ce096a62f46e2e9420a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 10:03:51 +0200 Subject: workqueue: remove WQ_SINGLE_CPU and use WQ_UNBOUND instead WQ_SINGLE_CPU combined with @max_active of 1 is used to achieve full ordering among works queued to a workqueue. The same can be achieved using WQ_UNBOUND as unbound workqueues always use the gcwq for WORK_CPU_UNBOUND. As @max_active is always one and benefits from cpu locality isn't accessible anyway, serving them with unbound workqueues should be fine. Drop WQ_SINGLE_CPU support and use WQ_UNBOUND instead. Note that most single thread workqueue users will be converted to use multithread or non-reentrant instead and only the ones which require strict ordering will keep using WQ_UNBOUND + @max_active of 1. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 100 ++++++++++------------------------------------------- 1 file changed, 18 insertions(+), 82 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4608563..20d6237 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -206,8 +206,6 @@ struct workqueue_struct { struct list_head flusher_queue; /* F: flush waiters */ struct list_head flusher_overflow; /* F: flush overflow list */ - unsigned long single_cpu; /* cpu for single cpu wq */ - cpumask_var_t mayday_mask; /* cpus requesting rescue */ struct worker *rescuer; /* I: rescue worker */ @@ -889,34 +887,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq, wake_up_worker(gcwq); } -/** - * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing - * @cwq: cwq to unbind - * - * Try to unbind @cwq from single cpu workqueue processing. If - * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed. - * - * CONTEXT: - * spin_lock_irq(gcwq->lock). - */ -static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq) -{ - struct workqueue_struct *wq = cwq->wq; - struct global_cwq *gcwq = cwq->gcwq; - - BUG_ON(wq->single_cpu != gcwq->cpu); - /* - * Unbind from workqueue if @cwq is not frozen. If frozen, - * thaw_workqueues() will either restart processing on this - * cpu or unbind if empty. This keeps works queued while - * frozen fully ordered and flushable. - */ - if (likely(!(gcwq->flags & GCWQ_FREEZING))) { - smp_wmb(); /* paired with cmpxchg() in __queue_work() */ - wq->single_cpu = WORK_CPU_NONE; - } -} - static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct work_struct *work) { @@ -924,20 +894,16 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct cpu_workqueue_struct *cwq; struct list_head *worklist; unsigned long flags; - bool arbitrate; debug_work_activate(work); - if (unlikely(cpu == WORK_CPU_UNBOUND)) - cpu = raw_smp_processor_id(); - - /* - * Determine gcwq to use. SINGLE_CPU is inherently - * NON_REENTRANT, so test it first. - */ - if (!(wq->flags & (WQ_SINGLE_CPU | WQ_UNBOUND))) { + /* determine gcwq to use */ + if (!(wq->flags & WQ_UNBOUND)) { struct global_cwq *last_gcwq; + if (unlikely(cpu == WORK_CPU_UNBOUND)) + cpu = raw_smp_processor_id(); + /* * It's multi cpu. If @wq is non-reentrant and @work * was previously on a different cpu, it might still @@ -962,38 +928,6 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, } } else spin_lock_irqsave(&gcwq->lock, flags); - } else if (!(wq->flags & WQ_UNBOUND)) { - unsigned int req_cpu = cpu; - - /* - * It's a bit more complex for single cpu workqueues. - * We first need to determine which cpu is going to be - * used. If no cpu is currently serving this - * workqueue, arbitrate using atomic accesses to - * wq->single_cpu; otherwise, use the current one. - */ - retry: - cpu = wq->single_cpu; - arbitrate = cpu == WORK_CPU_NONE; - if (arbitrate) - cpu = req_cpu; - - gcwq = get_gcwq(cpu); - spin_lock_irqsave(&gcwq->lock, flags); - - /* - * The following cmpxchg() is a full barrier paired - * with smp_wmb() in cwq_unbind_single_cpu() and - * guarantees that all changes to wq->st_* fields are - * visible on the new cpu after this point. - */ - if (arbitrate) - cmpxchg(&wq->single_cpu, WORK_CPU_NONE, cpu); - - if (unlikely(wq->single_cpu != cpu)) { - spin_unlock_irqrestore(&gcwq->lock, flags); - goto retry; - } } else { gcwq = get_gcwq(WORK_CPU_UNBOUND); spin_lock_irqsave(&gcwq->lock, flags); @@ -1105,19 +1039,30 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work = &dwork->work; if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { - struct global_cwq *gcwq = get_work_gcwq(work); - unsigned int lcpu = gcwq ? gcwq->cpu : raw_smp_processor_id(); + unsigned int lcpu; BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); timer_stats_timer_set_start_info(&dwork->timer); + /* * This stores cwq for the moment, for the timer_fn. * Note that the work's gcwq is preserved to allow * reentrance detection for delayed works. */ + if (!(wq->flags & WQ_UNBOUND)) { + struct global_cwq *gcwq = get_work_gcwq(work); + + if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND) + lcpu = gcwq->cpu; + else + lcpu = raw_smp_processor_id(); + } else + lcpu = WORK_CPU_UNBOUND; + set_work_cwq(work, get_cwq(lcpu, wq), 0); + timer->expires = jiffies + delay; timer->data = (unsigned long)dwork; timer->function = delayed_work_timer_fn; @@ -1696,9 +1641,6 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) /* one down, submit a delayed one */ if (cwq->nr_active < cwq->max_active) cwq_activate_first_delayed(cwq); - } else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) { - /* this was the last work, unbind from single cpu */ - cwq_unbind_single_cpu(cwq); } /* is flush in progress and are we at the flushing tip? */ @@ -2751,7 +2693,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, atomic_set(&wq->nr_cwqs_to_flush, 0); INIT_LIST_HEAD(&wq->flusher_queue); INIT_LIST_HEAD(&wq->flusher_overflow); - wq->single_cpu = WORK_CPU_NONE; wq->name = name; lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); @@ -3513,11 +3454,6 @@ void thaw_workqueues(void) while (!list_empty(&cwq->delayed_works) && cwq->nr_active < cwq->max_active) cwq_activate_first_delayed(cwq); - - /* perform delayed unbind from single cpu if empty */ - if (wq->single_cpu == gcwq->cpu && - !cwq->nr_active && list_empty(&cwq->delayed_works)) - cwq_unbind_single_cpu(cwq); } wake_up_worker(gcwq); -- cgit v1.1 From e09c8614b32915c16f68e039ac7040e602d73e35 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 5 Jul 2010 15:54:45 -0300 Subject: tracing/kprobes: Support "string" type Support string type tracing and printing in kprobe-tracer. This allows user to trace string data in kernel including __user data. Note that sometimes __user data may not be accessed if it is paged-out (sorry, but kprobes operation should be done in atomic, we can not wait for page-in). Commiter note: Fixed up conflicts with b7e2ece. Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Frederic Weisbecker LKML-Reference: <20100519195724.2885.18788.stgit@localhost6.localdomain6> Signed-off-by: Masami Hiramatsu Signed-off-by: Arnaldo Carvalho de Melo --- kernel/trace/trace_kprobe.c | 370 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 292 insertions(+), 78 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 3b831d8..1b79d1c 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -30,6 +30,8 @@ #include #include #include +#include +#include #include #include "trace.h" @@ -38,6 +40,7 @@ #define MAX_TRACE_ARGS 128 #define MAX_ARGSTR_LEN 63 #define MAX_EVENT_NAME_LEN 64 +#define MAX_STRING_SIZE PATH_MAX #define KPROBE_EVENT_SYSTEM "kprobes" /* Reserved field names */ @@ -58,14 +61,16 @@ const char *reserved_field_names[] = { }; /* Printing function type */ -typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *); +typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *, + void *); #define PRINT_TYPE_FUNC_NAME(type) print_type_##type #define PRINT_TYPE_FMT_NAME(type) print_type_format_##type /* Printing in basic type function template */ #define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt, cast) \ static __kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \ - const char *name, void *data)\ + const char *name, \ + void *data, void *ent)\ { \ return trace_seq_printf(s, " %s=" fmt, name, (cast)*(type *)data);\ } \ @@ -80,6 +85,49 @@ DEFINE_BASIC_PRINT_TYPE_FUNC(s16, "%d", int) DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%ld", long) DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%lld", long long) +/* data_rloc: data relative location, compatible with u32 */ +#define make_data_rloc(len, roffs) \ + (((u32)(len) << 16) | ((u32)(roffs) & 0xffff)) +#define get_rloc_len(dl) ((u32)(dl) >> 16) +#define get_rloc_offs(dl) ((u32)(dl) & 0xffff) + +static inline void *get_rloc_data(u32 *dl) +{ + return (u8 *)dl + get_rloc_offs(*dl); +} + +/* For data_loc conversion */ +static inline void *get_loc_data(u32 *dl, void *ent) +{ + return (u8 *)ent + get_rloc_offs(*dl); +} + +/* + * Convert data_rloc to data_loc: + * data_rloc stores the offset from data_rloc itself, but data_loc + * stores the offset from event entry. + */ +#define convert_rloc_to_loc(dl, offs) ((u32)(dl) + (offs)) + +/* For defining macros, define string/string_size types */ +typedef u32 string; +typedef u32 string_size; + +/* Print type function for string type */ +static __kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, + const char *name, + void *data, void *ent) +{ + int len = *(u32 *)data >> 16; + + if (!len) + return trace_seq_printf(s, " %s=(fault)", name); + else + return trace_seq_printf(s, " %s=\"%s\"", name, + (const char *)get_loc_data(data, ent)); +} +static const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\""; + /* Data fetch function type */ typedef void (*fetch_func_t)(struct pt_regs *, void *, void *); @@ -94,32 +142,38 @@ static __kprobes void call_fetch(struct fetch_param *fprm, return fprm->fn(regs, fprm->data, dest); } -#define FETCH_FUNC_NAME(kind, type) fetch_##kind##_##type +#define FETCH_FUNC_NAME(method, type) fetch_##method##_##type /* * Define macro for basic types - we don't need to define s* types, because * we have to care only about bitwidth at recording time. */ -#define DEFINE_BASIC_FETCH_FUNCS(kind) \ -DEFINE_FETCH_##kind(u8) \ -DEFINE_FETCH_##kind(u16) \ -DEFINE_FETCH_##kind(u32) \ -DEFINE_FETCH_##kind(u64) - -#define CHECK_BASIC_FETCH_FUNCS(kind, fn) \ - ((FETCH_FUNC_NAME(kind, u8) == fn) || \ - (FETCH_FUNC_NAME(kind, u16) == fn) || \ - (FETCH_FUNC_NAME(kind, u32) == fn) || \ - (FETCH_FUNC_NAME(kind, u64) == fn)) +#define DEFINE_BASIC_FETCH_FUNCS(method) \ +DEFINE_FETCH_##method(u8) \ +DEFINE_FETCH_##method(u16) \ +DEFINE_FETCH_##method(u32) \ +DEFINE_FETCH_##method(u64) + +#define CHECK_FETCH_FUNCS(method, fn) \ + (((FETCH_FUNC_NAME(method, u8) == fn) || \ + (FETCH_FUNC_NAME(method, u16) == fn) || \ + (FETCH_FUNC_NAME(method, u32) == fn) || \ + (FETCH_FUNC_NAME(method, u64) == fn) || \ + (FETCH_FUNC_NAME(method, string) == fn) || \ + (FETCH_FUNC_NAME(method, string_size) == fn)) \ + && (fn != NULL)) /* Data fetch function templates */ #define DEFINE_FETCH_reg(type) \ static __kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \ - void *offset, void *dest) \ + void *offset, void *dest) \ { \ *(type *)dest = (type)regs_get_register(regs, \ (unsigned int)((unsigned long)offset)); \ } DEFINE_BASIC_FETCH_FUNCS(reg) +/* No string on the register */ +#define fetch_reg_string NULL +#define fetch_reg_string_size NULL #define DEFINE_FETCH_stack(type) \ static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ @@ -129,6 +183,9 @@ static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ (unsigned int)((unsigned long)offset)); \ } DEFINE_BASIC_FETCH_FUNCS(stack) +/* No string on the stack entry */ +#define fetch_stack_string NULL +#define fetch_stack_string_size NULL #define DEFINE_FETCH_retval(type) \ static __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs,\ @@ -137,6 +194,9 @@ static __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs,\ *(type *)dest = (type)regs_return_value(regs); \ } DEFINE_BASIC_FETCH_FUNCS(retval) +/* No string on the retval */ +#define fetch_retval_string NULL +#define fetch_retval_string_size NULL #define DEFINE_FETCH_memory(type) \ static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ @@ -149,6 +209,62 @@ static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ *(type *)dest = retval; \ } DEFINE_BASIC_FETCH_FUNCS(memory) +/* + * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max + * length and relative data location. + */ +static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, + void *addr, void *dest) +{ + long ret; + int maxlen = get_rloc_len(*(u32 *)dest); + u8 *dst = get_rloc_data(dest); + u8 *src = addr; + mm_segment_t old_fs = get_fs(); + if (!maxlen) + return; + /* + * Try to get string again, since the string can be changed while + * probing. + */ + set_fs(KERNEL_DS); + pagefault_disable(); + do + ret = __copy_from_user_inatomic(dst++, src++, 1); + while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen); + dst[-1] = '\0'; + pagefault_enable(); + set_fs(old_fs); + + if (ret < 0) { /* Failed to fetch string */ + ((u8 *)get_rloc_data(dest))[0] = '\0'; + *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest)); + } else + *(u32 *)dest = make_data_rloc(src - (u8 *)addr, + get_rloc_offs(*(u32 *)dest)); +} +/* Return the length of string -- including null terminal byte */ +static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, + void *addr, void *dest) +{ + int ret, len = 0; + u8 c; + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + pagefault_disable(); + do { + ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); + len++; + } while (c && ret == 0 && len < MAX_STRING_SIZE); + pagefault_enable(); + set_fs(old_fs); + + if (ret < 0) /* Failed to check the length */ + *(u32 *)dest = 0; + else + *(u32 *)dest = len; +} /* Memory fetching by symbol */ struct symbol_cache { @@ -203,6 +319,8 @@ static __kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs,\ *(type *)dest = 0; \ } DEFINE_BASIC_FETCH_FUNCS(symbol) +DEFINE_FETCH_symbol(string) +DEFINE_FETCH_symbol(string_size) /* Dereference memory access function */ struct deref_fetch_param { @@ -224,12 +342,14 @@ static __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs,\ *(type *)dest = 0; \ } DEFINE_BASIC_FETCH_FUNCS(deref) +DEFINE_FETCH_deref(string) +DEFINE_FETCH_deref(string_size) static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) { - if (CHECK_BASIC_FETCH_FUNCS(deref, data->orig.fn)) + if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) free_deref_fetch_param(data->orig.data); - else if (CHECK_BASIC_FETCH_FUNCS(symbol, data->orig.fn)) + else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn)) free_symbol_cache(data->orig.data); kfree(data); } @@ -240,23 +360,43 @@ static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) #define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG) #define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE) -#define ASSIGN_FETCH_FUNC(kind, type) \ - .kind = FETCH_FUNC_NAME(kind, type) - -#define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \ - {.name = #ptype, \ - .size = sizeof(ftype), \ - .is_signed = sign, \ - .print = PRINT_TYPE_FUNC_NAME(ptype), \ - .fmt = PRINT_TYPE_FMT_NAME(ptype), \ -ASSIGN_FETCH_FUNC(reg, ftype), \ -ASSIGN_FETCH_FUNC(stack, ftype), \ -ASSIGN_FETCH_FUNC(retval, ftype), \ -ASSIGN_FETCH_FUNC(memory, ftype), \ -ASSIGN_FETCH_FUNC(symbol, ftype), \ -ASSIGN_FETCH_FUNC(deref, ftype), \ +/* Fetch types */ +enum { + FETCH_MTD_reg = 0, + FETCH_MTD_stack, + FETCH_MTD_retval, + FETCH_MTD_memory, + FETCH_MTD_symbol, + FETCH_MTD_deref, + FETCH_MTD_END, +}; + +#define ASSIGN_FETCH_FUNC(method, type) \ + [FETCH_MTD_##method] = FETCH_FUNC_NAME(method, type) + +#define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \ + {.name = _name, \ + .size = _size, \ + .is_signed = sign, \ + .print = PRINT_TYPE_FUNC_NAME(ptype), \ + .fmt = PRINT_TYPE_FMT_NAME(ptype), \ + .fmttype = _fmttype, \ + .fetch = { \ +ASSIGN_FETCH_FUNC(reg, ftype), \ +ASSIGN_FETCH_FUNC(stack, ftype), \ +ASSIGN_FETCH_FUNC(retval, ftype), \ +ASSIGN_FETCH_FUNC(memory, ftype), \ +ASSIGN_FETCH_FUNC(symbol, ftype), \ +ASSIGN_FETCH_FUNC(deref, ftype), \ + } \ } +#define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \ + __ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, #ptype) + +#define FETCH_TYPE_STRING 0 +#define FETCH_TYPE_STRSIZE 1 + /* Fetch type information table */ static const struct fetch_type { const char *name; /* Name of type */ @@ -264,14 +404,16 @@ static const struct fetch_type { int is_signed; /* Signed flag */ print_type_func_t print; /* Print functions */ const char *fmt; /* Fromat string */ + const char *fmttype; /* Name in format file */ /* Fetch functions */ - fetch_func_t reg; - fetch_func_t stack; - fetch_func_t retval; - fetch_func_t memory; - fetch_func_t symbol; - fetch_func_t deref; + fetch_func_t fetch[FETCH_MTD_END]; } fetch_type_table[] = { + /* Special types */ + [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, + sizeof(u32), 1, "__data_loc char[]"), + [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32, + string_size, sizeof(u32), 0, "u32"), + /* Basic types */ ASSIGN_FETCH_TYPE(u8, u8, 0), ASSIGN_FETCH_TYPE(u16, u16, 0), ASSIGN_FETCH_TYPE(u32, u32, 0), @@ -302,12 +444,28 @@ static __kprobes void fetch_stack_address(struct pt_regs *regs, *(unsigned long *)dest = kernel_stack_pointer(regs); } +static fetch_func_t get_fetch_size_function(const struct fetch_type *type, + fetch_func_t orig_fn) +{ + int i; + + if (type != &fetch_type_table[FETCH_TYPE_STRING]) + return NULL; /* Only string type needs size function */ + for (i = 0; i < FETCH_MTD_END; i++) + if (type->fetch[i] == orig_fn) + return fetch_type_table[FETCH_TYPE_STRSIZE].fetch[i]; + + WARN_ON(1); /* This should not happen */ + return NULL; +} + /** * Kprobe event core functions */ struct probe_arg { struct fetch_param fetch; + struct fetch_param fetch_size; unsigned int offset; /* Offset from argument entry */ const char *name; /* Name of this argument */ const char *comm; /* Command of this argument */ @@ -429,9 +587,9 @@ error: static void free_probe_arg(struct probe_arg *arg) { - if (CHECK_BASIC_FETCH_FUNCS(deref, arg->fetch.fn)) + if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn)) free_deref_fetch_param(arg->fetch.data); - else if (CHECK_BASIC_FETCH_FUNCS(symbol, arg->fetch.fn)) + else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn)) free_symbol_cache(arg->fetch.data); kfree(arg->name); kfree(arg->comm); @@ -548,7 +706,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, if (strcmp(arg, "retval") == 0) { if (is_return) - f->fn = t->retval; + f->fn = t->fetch[FETCH_MTD_retval]; else ret = -EINVAL; } else if (strncmp(arg, "stack", 5) == 0) { @@ -562,7 +720,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, if (ret || param > PARAM_MAX_STACK) ret = -EINVAL; else { - f->fn = t->stack; + f->fn = t->fetch[FETCH_MTD_stack]; f->data = (void *)param; } } else @@ -588,7 +746,7 @@ static int __parse_probe_arg(char *arg, const struct fetch_type *t, case '%': /* named register */ ret = regs_query_register_offset(arg + 1); if (ret >= 0) { - f->fn = t->reg; + f->fn = t->fetch[FETCH_MTD_reg]; f->data = (void *)(unsigned long)ret; ret = 0; } @@ -598,7 +756,7 @@ static int __parse_probe_arg(char *arg, const struct fetch_type *t, ret = strict_strtoul(arg + 1, 0, ¶m); if (ret) break; - f->fn = t->memory; + f->fn = t->fetch[FETCH_MTD_memory]; f->data = (void *)param; } else { ret = split_symbol_offset(arg + 1, &offset); @@ -606,7 +764,7 @@ static int __parse_probe_arg(char *arg, const struct fetch_type *t, break; f->data = alloc_symbol_cache(arg + 1, offset); if (f->data) - f->fn = t->symbol; + f->fn = t->fetch[FETCH_MTD_symbol]; } break; case '+': /* deref memory */ @@ -636,14 +794,17 @@ static int __parse_probe_arg(char *arg, const struct fetch_type *t, if (ret) kfree(dprm); else { - f->fn = t->deref; + f->fn = t->fetch[FETCH_MTD_deref]; f->data = (void *)dprm; } } break; } - if (!ret && !f->fn) + if (!ret && !f->fn) { /* Parsed, but do not find fetch method */ + pr_info("%s type has no corresponding fetch method.\n", + t->name); ret = -EINVAL; + } return ret; } @@ -652,6 +813,7 @@ static int parse_probe_arg(char *arg, struct trace_probe *tp, struct probe_arg *parg, int is_return) { const char *t; + int ret; if (strlen(arg) > MAX_ARGSTR_LEN) { pr_info("Argument is too long.: %s\n", arg); @@ -674,7 +836,13 @@ static int parse_probe_arg(char *arg, struct trace_probe *tp, } parg->offset = tp->size; tp->size += parg->type->size; - return __parse_probe_arg(arg, parg->type, &parg->fetch, is_return); + ret = __parse_probe_arg(arg, parg->type, &parg->fetch, is_return); + if (ret >= 0) { + parg->fetch_size.fn = get_fetch_size_function(parg->type, + parg->fetch.fn); + parg->fetch_size.data = parg->fetch.data; + } + return ret; } /* Return 1 if name is reserved or already used by another argument */ @@ -1043,6 +1211,54 @@ static const struct file_operations kprobe_profile_ops = { .release = seq_release, }; +/* Sum up total data length for dynamic arraies (strings) */ +static __kprobes int __get_data_size(struct trace_probe *tp, + struct pt_regs *regs) +{ + int i, ret = 0; + u32 len; + + for (i = 0; i < tp->nr_args; i++) + if (unlikely(tp->args[i].fetch_size.fn)) { + call_fetch(&tp->args[i].fetch_size, regs, &len); + ret += len; + } + + return ret; +} + +/* Store the value of each argument */ +static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp, + struct pt_regs *regs, + u8 *data, int maxlen) +{ + int i; + u32 end = tp->size; + u32 *dl; /* Data (relative) location */ + + for (i = 0; i < tp->nr_args; i++) { + if (unlikely(tp->args[i].fetch_size.fn)) { + /* + * First, we set the relative location and + * maximum data length to *dl + */ + dl = (u32 *)(data + tp->args[i].offset); + *dl = make_data_rloc(maxlen, end - tp->args[i].offset); + /* Then try to fetch string or dynamic array data */ + call_fetch(&tp->args[i].fetch, regs, dl); + /* Reduce maximum length */ + end += get_rloc_len(*dl); + maxlen -= get_rloc_len(*dl); + /* Trick here, convert data_rloc to data_loc */ + *dl = convert_rloc_to_loc(*dl, + ent_size + tp->args[i].offset); + } else + /* Just fetching data normally */ + call_fetch(&tp->args[i].fetch, regs, + data + tp->args[i].offset); + } +} + /* Kprobe handler */ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) { @@ -1050,8 +1266,7 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) struct kprobe_trace_entry_head *entry; struct ring_buffer_event *event; struct ring_buffer *buffer; - u8 *data; - int size, i, pc; + int size, dsize, pc; unsigned long irq_flags; struct ftrace_event_call *call = &tp->call; @@ -1060,7 +1275,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) local_save_flags(irq_flags); pc = preempt_count(); - size = sizeof(*entry) + tp->size; + dsize = __get_data_size(tp, regs); + size = sizeof(*entry) + tp->size + dsize; event = trace_current_buffer_lock_reserve(&buffer, call->event.type, size, irq_flags, pc); @@ -1069,9 +1285,7 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) entry = ring_buffer_event_data(event); entry->ip = (unsigned long)kp->addr; - data = (u8 *)&entry[1]; - for (i = 0; i < tp->nr_args; i++) - call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); + store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); if (!filter_current_check_discard(buffer, call, entry, event)) trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); @@ -1085,15 +1299,15 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, struct kretprobe_trace_entry_head *entry; struct ring_buffer_event *event; struct ring_buffer *buffer; - u8 *data; - int size, i, pc; + int size, pc, dsize; unsigned long irq_flags; struct ftrace_event_call *call = &tp->call; local_save_flags(irq_flags); pc = preempt_count(); - size = sizeof(*entry) + tp->size; + dsize = __get_data_size(tp, regs); + size = sizeof(*entry) + tp->size + dsize; event = trace_current_buffer_lock_reserve(&buffer, call->event.type, size, irq_flags, pc); @@ -1103,9 +1317,7 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, entry = ring_buffer_event_data(event); entry->func = (unsigned long)tp->rp.kp.addr; entry->ret_ip = (unsigned long)ri->ret_addr; - data = (u8 *)&entry[1]; - for (i = 0; i < tp->nr_args; i++) - call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); + store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); if (!filter_current_check_discard(buffer, call, entry, event)) trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); @@ -1137,7 +1349,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags, data = (u8 *)&field[1]; for (i = 0; i < tp->nr_args; i++) if (!tp->args[i].type->print(s, tp->args[i].name, - data + tp->args[i].offset)) + data + tp->args[i].offset, field)) goto partial; if (!trace_seq_puts(s, "\n")) @@ -1179,7 +1391,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, data = (u8 *)&field[1]; for (i = 0; i < tp->nr_args; i++) if (!tp->args[i].type->print(s, tp->args[i].name, - data + tp->args[i].offset)) + data + tp->args[i].offset, field)) goto partial; if (!trace_seq_puts(s, "\n")) @@ -1234,7 +1446,7 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call) DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); /* Set argument names as fields */ for (i = 0; i < tp->nr_args; i++) { - ret = trace_define_field(event_call, tp->args[i].type->name, + ret = trace_define_field(event_call, tp->args[i].type->fmttype, tp->args[i].name, sizeof(field) + tp->args[i].offset, tp->args[i].type->size, @@ -1256,7 +1468,7 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); /* Set argument names as fields */ for (i = 0; i < tp->nr_args; i++) { - ret = trace_define_field(event_call, tp->args[i].type->name, + ret = trace_define_field(event_call, tp->args[i].type->fmttype, tp->args[i].name, sizeof(field) + tp->args[i].offset, tp->args[i].type->size, @@ -1296,8 +1508,13 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len) pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg); for (i = 0; i < tp->nr_args; i++) { - pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s", - tp->args[i].name); + if (strcmp(tp->args[i].type->name, "string") == 0) + pos += snprintf(buf + pos, LEN_OR_ZERO, + ", __get_str(%s)", + tp->args[i].name); + else + pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s", + tp->args[i].name); } #undef LEN_OR_ZERO @@ -1334,11 +1551,11 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, struct ftrace_event_call *call = &tp->call; struct kprobe_trace_entry_head *entry; struct hlist_head *head; - u8 *data; - int size, __size, i; + int size, __size, dsize; int rctx; - __size = sizeof(*entry) + tp->size; + dsize = __get_data_size(tp, regs); + __size = sizeof(*entry) + tp->size + dsize; size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, @@ -1350,9 +1567,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, return; entry->ip = (unsigned long)kp->addr; - data = (u8 *)&entry[1]; - for (i = 0; i < tp->nr_args; i++) - call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); + memset(&entry[1], 0, dsize); + store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); head = this_cpu_ptr(call->perf_events); perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); @@ -1366,11 +1582,11 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, struct ftrace_event_call *call = &tp->call; struct kretprobe_trace_entry_head *entry; struct hlist_head *head; - u8 *data; - int size, __size, i; + int size, __size, dsize; int rctx; - __size = sizeof(*entry) + tp->size; + dsize = __get_data_size(tp, regs); + __size = sizeof(*entry) + tp->size + dsize; size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, @@ -1383,9 +1599,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, entry->func = (unsigned long)tp->rp.kp.addr; entry->ret_ip = (unsigned long)ri->ret_addr; - data = (u8 *)&entry[1]; - for (i = 0; i < tp->nr_args; i++) - call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); + store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); head = this_cpu_ptr(call->perf_events); perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); -- cgit v1.1 From eb703f98191a505f78d0066712ad67d5dedc4c90 Mon Sep 17 00:00:00 2001 From: Kulikov Vasiliy Date: Mon, 5 Jul 2010 12:00:54 +0400 Subject: kernel/watchdog: Initialize 'result' Variable on the stack is not initialized to zero, do it explicitly. This bug was found by a compiler warning: kernel/watchdog.c:463: warning: 'result' may be used uninitialized in this function Signed-off-by: Kulikov Vasiliy Acked-by: Don Zickus Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: <1278316854-28442-1-git-send-email-segooon@gmail.com> Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 91b0b26..613bc1f 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -460,7 +460,7 @@ static void watchdog_disable(int cpu) static void watchdog_enable_all_cpus(void) { int cpu; - int result; + int result = 0; for_each_online_cpu(cpu) result += watchdog_enable(cpu); -- cgit v1.1 From 5e3d20a68f63fc5a310687d81956c3b96e488b84 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sun, 4 Jul 2010 00:02:26 +0200 Subject: init: Remove the BKL from startup code I have shown by code review that no driver takes the BKL at init time any more, so whatever the init code was locking against is no longer there and it is now safe to remove the BKL there. Signed-off-by: Arnd Bergmann Acked-by: Steven Rostedt Signed-off-by: Frederic Weisbecker --- kernel/trace/trace.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 086d363..8047ca5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -734,13 +734,6 @@ __acquires(kernel_lock) return -1; } - /* - * When this gets called we hold the BKL which means that - * preemption is disabled. Various trace selftests however - * need to disable and enable preemption for successful tests. - * So we drop the BKL here and grab it after the tests again. - */ - unlock_kernel(); mutex_lock(&trace_types_lock); tracing_selftest_running = true; @@ -822,7 +815,6 @@ __acquires(kernel_lock) #endif out_unlock: - lock_kernel(); return ret; } -- cgit v1.1 From 698f93159a735bd29a8767c9f60d9b2d75870f8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Fri, 2 Jul 2010 20:41:51 +0200 Subject: fix comment/printk typos concerning "already" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Uwe Kleine-König Signed-off-by: Jiri Kosina --- kernel/time/tick-broadcast.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index b3bafd5..48b2761 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -188,7 +188,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) /* * Setup the next period for devices, which do not have * periodic mode. We read dev->next_event first and add to it - * when the event alrady expired. clockevents_program_event() + * when the event already expired. clockevents_program_event() * sets dev->next_event only when the event is really * programmed to the device. */ -- cgit v1.1 From 083b804c4d3e1e3d0eace56bdbc0f674946d2847 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 10:03:52 +0200 Subject: async: use workqueue for worker pool Replace private worker pool with system_unbound_wq. Signed-off-by: Tejun Heo Acked-by: Arjan van de Ven --- kernel/async.c | 141 +++++++++------------------------------------------------ 1 file changed, 22 insertions(+), 119 deletions(-) (limited to 'kernel') diff --git a/kernel/async.c b/kernel/async.c index 15319d6..cd9dbb9 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -49,40 +49,33 @@ asynchronous and synchronous parts of the kernel. */ #include -#include #include #include #include -#include -#include -#include #include +#include #include static async_cookie_t next_cookie = 1; -#define MAX_THREADS 256 #define MAX_WORK 32768 static LIST_HEAD(async_pending); static LIST_HEAD(async_running); static DEFINE_SPINLOCK(async_lock); -static int async_enabled = 0; - struct async_entry { - struct list_head list; - async_cookie_t cookie; - async_func_ptr *func; - void *data; - struct list_head *running; + struct list_head list; + struct work_struct work; + async_cookie_t cookie; + async_func_ptr *func; + void *data; + struct list_head *running; }; static DECLARE_WAIT_QUEUE_HEAD(async_done); -static DECLARE_WAIT_QUEUE_HEAD(async_new); static atomic_t entry_count; -static atomic_t thread_count; extern int initcall_debug; @@ -117,27 +110,23 @@ static async_cookie_t lowest_in_progress(struct list_head *running) spin_unlock_irqrestore(&async_lock, flags); return ret; } + /* * pick the first pending entry and run it */ -static void run_one_entry(void) +static void async_run_entry_fn(struct work_struct *work) { + struct async_entry *entry = + container_of(work, struct async_entry, work); unsigned long flags; - struct async_entry *entry; ktime_t calltime, delta, rettime; - /* 1) pick one task from the pending queue */ - + /* 1) move self to the running queue */ spin_lock_irqsave(&async_lock, flags); - if (list_empty(&async_pending)) - goto out; - entry = list_first_entry(&async_pending, struct async_entry, list); - - /* 2) move it to the running queue */ list_move_tail(&entry->list, entry->running); spin_unlock_irqrestore(&async_lock, flags); - /* 3) run it (and print duration)*/ + /* 2) run (and print duration) */ if (initcall_debug && system_state == SYSTEM_BOOTING) { printk("calling %lli_%pF @ %i\n", (long long)entry->cookie, entry->func, task_pid_nr(current)); @@ -153,31 +142,25 @@ static void run_one_entry(void) (long long)ktime_to_ns(delta) >> 10); } - /* 4) remove it from the running queue */ + /* 3) remove self from the running queue */ spin_lock_irqsave(&async_lock, flags); list_del(&entry->list); - /* 5) free the entry */ + /* 4) free the entry */ kfree(entry); atomic_dec(&entry_count); spin_unlock_irqrestore(&async_lock, flags); - /* 6) wake up any waiters. */ + /* 5) wake up any waiters */ wake_up(&async_done); - return; - -out: - spin_unlock_irqrestore(&async_lock, flags); } - static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running) { struct async_entry *entry; unsigned long flags; async_cookie_t newcookie; - /* allow irq-off callers */ entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); @@ -186,7 +169,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l * If we're out of memory or if there's too much work * pending already, we execute synchronously. */ - if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) { + if (!entry || atomic_read(&entry_count) > MAX_WORK) { kfree(entry); spin_lock_irqsave(&async_lock, flags); newcookie = next_cookie++; @@ -196,6 +179,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l ptr(data, newcookie); return newcookie; } + INIT_WORK(&entry->work, async_run_entry_fn); entry->func = ptr; entry->data = data; entry->running = running; @@ -205,7 +189,10 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l list_add_tail(&entry->list, &async_pending); atomic_inc(&entry_count); spin_unlock_irqrestore(&async_lock, flags); - wake_up(&async_new); + + /* schedule for execution */ + queue_work(system_unbound_wq, &entry->work); + return newcookie; } @@ -312,87 +299,3 @@ void async_synchronize_cookie(async_cookie_t cookie) async_synchronize_cookie_domain(cookie, &async_running); } EXPORT_SYMBOL_GPL(async_synchronize_cookie); - - -static int async_thread(void *unused) -{ - DECLARE_WAITQUEUE(wq, current); - add_wait_queue(&async_new, &wq); - - while (!kthread_should_stop()) { - int ret = HZ; - set_current_state(TASK_INTERRUPTIBLE); - /* - * check the list head without lock.. false positives - * are dealt with inside run_one_entry() while holding - * the lock. - */ - rmb(); - if (!list_empty(&async_pending)) - run_one_entry(); - else - ret = schedule_timeout(HZ); - - if (ret == 0) { - /* - * we timed out, this means we as thread are redundant. - * we sign off and die, but we to avoid any races there - * is a last-straw check to see if work snuck in. - */ - atomic_dec(&thread_count); - wmb(); /* manager must see our departure first */ - if (list_empty(&async_pending)) - break; - /* - * woops work came in between us timing out and us - * signing off; we need to stay alive and keep working. - */ - atomic_inc(&thread_count); - } - } - remove_wait_queue(&async_new, &wq); - - return 0; -} - -static int async_manager_thread(void *unused) -{ - DECLARE_WAITQUEUE(wq, current); - add_wait_queue(&async_new, &wq); - - while (!kthread_should_stop()) { - int tc, ec; - - set_current_state(TASK_INTERRUPTIBLE); - - tc = atomic_read(&thread_count); - rmb(); - ec = atomic_read(&entry_count); - - while (tc < ec && tc < MAX_THREADS) { - if (IS_ERR(kthread_run(async_thread, NULL, "async/%i", - tc))) { - msleep(100); - continue; - } - atomic_inc(&thread_count); - tc++; - } - - schedule(); - } - remove_wait_queue(&async_new, &wq); - - return 0; -} - -static int __init async_init(void) -{ - async_enabled = - !IS_ERR(kthread_run(async_manager_thread, NULL, "async/mgr")); - - WARN_ON(!async_enabled); - return 0; -} - -core_initcall(async_init); -- cgit v1.1 From 9f9c23644b07e7a51f6f39064e61c150be712932 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Jul 2010 11:31:20 +0200 Subject: workqueue: fix locking in retry path of maybe_create_worker() maybe_create_worker() mismanaged locking when worker creation fails and it has to retry. Fix locking and simplify lock manipulation. Signed-off-by: Tejun Heo Reported-by: Yong Zhang --- kernel/workqueue.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 20d6237..aca9472 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1442,14 +1442,14 @@ static bool maybe_create_worker(struct global_cwq *gcwq) if (!need_to_create_worker(gcwq)) return false; restart: + spin_unlock_irq(&gcwq->lock); + /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); while (true) { struct worker *worker; - spin_unlock_irq(&gcwq->lock); - worker = create_worker(gcwq, true); if (worker) { del_timer_sync(&gcwq->mayday_timer); @@ -1462,15 +1462,13 @@ restart: if (!need_to_create_worker(gcwq)) break; - spin_unlock_irq(&gcwq->lock); __set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(CREATE_COOLDOWN); - spin_lock_irq(&gcwq->lock); + if (!need_to_create_worker(gcwq)) break; } - spin_unlock_irq(&gcwq->lock); del_timer_sync(&gcwq->mayday_timer); spin_lock_irq(&gcwq->lock); if (need_to_create_worker(gcwq)) -- cgit v1.1 From 4c879170296174bde05cd1c643dac16594edee77 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 7 Jul 2010 15:30:10 +0200 Subject: padata: Check for valid padata instance on start This patch introduces the PADATA_INVALID flag which is checked on padata start. This will be used to mark a padata instance as invalid, if the padata cpumask does not intersect with the active cpumask. we change padata_start to return an error if the PADATA_INVALID is set. Also we adapt the only padata user, pcrypt to this change. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/padata.c b/kernel/padata.c index ff8de1b..e7d723a 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -485,6 +485,11 @@ static void padata_flush_queues(struct parallel_data *pd) BUG_ON(atomic_read(&pd->refcnt) != 0); } +static void __padata_start(struct padata_instance *pinst) +{ + pinst->flags |= PADATA_INIT; +} + /* Replace the internal control stucture with a new one. */ static void padata_replace(struct padata_instance *pinst, struct parallel_data *pd_new) @@ -619,11 +624,20 @@ EXPORT_SYMBOL(padata_remove_cpu); * * @pinst: padata instance to start */ -void padata_start(struct padata_instance *pinst) +int padata_start(struct padata_instance *pinst) { + int err = 0; + mutex_lock(&pinst->lock); - pinst->flags |= PADATA_INIT; + + if (pinst->flags & PADATA_INVALID) + err =-EINVAL; + + __padata_start(pinst); + mutex_unlock(&pinst->lock); + + return err; } EXPORT_SYMBOL(padata_start); -- cgit v1.1 From ee836555120140f770005b8ce6673c913d1b9a98 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 7 Jul 2010 15:30:47 +0200 Subject: padata: Block until the instance is unused on stop This patch makes padata_stop to block until the padata instance is unused. Also we split padata_stop to a locked and a unlocked version. This is in preparation to be able to change the cpumask after a call to patata stop. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/padata.c b/kernel/padata.c index e7d723a..9e18dfa 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -490,6 +490,20 @@ static void __padata_start(struct padata_instance *pinst) pinst->flags |= PADATA_INIT; } +static void __padata_stop(struct padata_instance *pinst) +{ + if (!(pinst->flags & PADATA_INIT)) + return; + + pinst->flags &= ~PADATA_INIT; + + synchronize_rcu(); + + get_online_cpus(); + padata_flush_queues(pinst->pd); + put_online_cpus(); +} + /* Replace the internal control stucture with a new one. */ static void padata_replace(struct padata_instance *pinst, struct parallel_data *pd_new) @@ -649,7 +663,7 @@ EXPORT_SYMBOL(padata_start); void padata_stop(struct padata_instance *pinst) { mutex_lock(&pinst->lock); - pinst->flags &= ~PADATA_INIT; + __padata_stop(pinst); mutex_unlock(&pinst->lock); } EXPORT_SYMBOL(padata_stop); @@ -770,17 +784,11 @@ EXPORT_SYMBOL(padata_alloc); */ void padata_free(struct padata_instance *pinst) { - padata_stop(pinst); - - synchronize_rcu(); - #ifdef CONFIG_HOTPLUG_CPU unregister_hotcpu_notifier(&pinst->cpu_notifier); #endif - get_online_cpus(); - padata_flush_queues(pinst->pd); - put_online_cpus(); + padata_stop(pinst); padata_free_pd(pinst->pd); free_cpumask_var(pinst->cpumask); kfree(pinst); -- cgit v1.1 From 33e54450683c5e970ac007489d7921ba792d093c Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 7 Jul 2010 15:31:26 +0200 Subject: padata: Handle empty padata cpumasks This patch fixes a bug when the padata cpumask does not intersect with the active cpumask. In this case we get a division by zero in padata_alloc_pd and we end up with a useless padata instance. Padata can end up with an empty cpumask for two reasons: 1. A user removed the last cpu that belongs to the padata cpumask and the active cpumask. 2. The last cpu that belongs to the padata cpumask and the active cpumask goes offline. We introduce a function padata_validate_cpumask to check if the padata cpumask does intersect with the active cpumask. If the cpumasks do not intersect we mark the instance as invalid, so it can't be used. We do not allocate the cpumask dependend recources in this case. This fixes the division by zero and keeps the padate instance in a consistent state. It's not possible to trigger this bug by now because the only padata user, pcrypt uses always the possible cpumask. Reported-by: Dan Kruchinin Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 50 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/padata.c b/kernel/padata.c index 9e18dfa..57ec4eb 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -516,12 +516,27 @@ static void padata_replace(struct padata_instance *pinst, synchronize_rcu(); - padata_flush_queues(pd_old); - padata_free_pd(pd_old); + if (pd_old) { + padata_flush_queues(pd_old); + padata_free_pd(pd_old); + } pinst->flags &= ~PADATA_RESET; } +/* If cpumask contains no active cpu, we mark the instance as invalid. */ +static bool padata_validate_cpumask(struct padata_instance *pinst, + const struct cpumask *cpumask) +{ + if (!cpumask_intersects(cpumask, cpu_active_mask)) { + pinst->flags |= PADATA_INVALID; + return false; + } + + pinst->flags &= ~PADATA_INVALID; + return true; +} + /** * padata_set_cpumask - set the cpumask that padata should use * @@ -531,11 +546,18 @@ static void padata_replace(struct padata_instance *pinst, int padata_set_cpumask(struct padata_instance *pinst, cpumask_var_t cpumask) { - struct parallel_data *pd; + int valid; int err = 0; + struct parallel_data *pd = NULL; mutex_lock(&pinst->lock); + valid = padata_validate_cpumask(pinst, cpumask); + if (!valid) { + __padata_stop(pinst); + goto out_replace; + } + get_online_cpus(); pd = padata_alloc_pd(pinst, cpumask); @@ -544,10 +566,14 @@ int padata_set_cpumask(struct padata_instance *pinst, goto out; } +out_replace: cpumask_copy(pinst->cpumask, cpumask); padata_replace(pinst, pd); + if (valid) + __padata_start(pinst); + out: put_online_cpus(); @@ -567,6 +593,9 @@ static int __padata_add_cpu(struct padata_instance *pinst, int cpu) return -ENOMEM; padata_replace(pinst, pd); + + if (padata_validate_cpumask(pinst, pinst->cpumask)) + __padata_start(pinst); } return 0; @@ -597,9 +626,16 @@ EXPORT_SYMBOL(padata_add_cpu); static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) { - struct parallel_data *pd; + struct parallel_data *pd = NULL; if (cpumask_test_cpu(cpu, cpu_online_mask)) { + + if (!padata_validate_cpumask(pinst, pinst->cpumask)) { + __padata_stop(pinst); + padata_replace(pinst, pd); + goto out; + } + pd = padata_alloc_pd(pinst, pinst->cpumask); if (!pd) return -ENOMEM; @@ -607,6 +643,7 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) padata_replace(pinst, pd); } +out: return 0; } @@ -732,7 +769,7 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask, struct workqueue_struct *wq) { struct padata_instance *pinst; - struct parallel_data *pd; + struct parallel_data *pd = NULL; pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); if (!pinst) @@ -740,12 +777,14 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask, get_online_cpus(); - pd = padata_alloc_pd(pinst, cpumask); - if (!pd) + if (!alloc_cpumask_var(&pinst->cpumask, GFP_KERNEL)) goto err_free_inst; - if (!alloc_cpumask_var(&pinst->cpumask, GFP_KERNEL)) - goto err_free_pd; + if (padata_validate_cpumask(pinst, cpumask)) { + pd = padata_alloc_pd(pinst, cpumask); + if (!pd) + goto err_free_mask; + } rcu_assign_pointer(pinst->pd, pd); @@ -767,8 +806,8 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask, return pinst; -err_free_pd: - padata_free_pd(pd); +err_free_mask: + free_cpumask_var(pinst->cpumask); err_free_inst: kfree(pinst); put_online_cpus(); -- cgit v1.1 From 83f619f3c8abb82cac9158cf23c656ec5c184607 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 7 Jul 2010 15:32:02 +0200 Subject: padata: make padata_do_parallel to return zero on success To return -EINPROGRESS on success in padata_do_parallel was considered to be odd. This patch changes this to return zero on success. Also the only user of padata, pcrypt is adapted to convert a return of zero to -EINPROGRESS within the crypto layer. This also removes the pcrypt fallback if padata_do_parallel was called on a not running padata instance as we can't handle it anymore. This fallback was unused, so it's save to remove it. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/padata.c b/kernel/padata.c index 57ec4eb..ae8defc 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -111,10 +111,13 @@ int padata_do_parallel(struct padata_instance *pinst, pd = rcu_dereference(pinst->pd); - err = 0; + err = -EINVAL; if (!(pinst->flags & PADATA_INIT)) goto out; + if (!cpumask_test_cpu(cb_cpu, pd->cpumask)) + goto out; + err = -EBUSY; if ((pinst->flags & PADATA_RESET)) goto out; @@ -122,11 +125,7 @@ int padata_do_parallel(struct padata_instance *pinst, if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) goto out; - err = -EINVAL; - if (!cpumask_test_cpu(cb_cpu, pd->cpumask)) - goto out; - - err = -EINPROGRESS; + err = 0; atomic_inc(&pd->refcnt); padata->pd = pd; padata->cb_cpu = cb_cpu; -- cgit v1.1 From 5f1a8c1bc724498ff32acbd59ed5263275676b9d Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 7 Jul 2010 15:32:39 +0200 Subject: padata: simplify serialization mechanism We count the number of processed objects on a percpu basis, so we need to go through all the percpu reorder queues to calculate the sequence number of the next object that needs serialization. This patch changes this to count the number of processed objects global. So we can calculate the sequence number and the percpu reorder queue of the next object that needs serialization without searching through the percpu reorder queues. This avoids some accesses to memory of foreign cpus. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 71 +++++++++++++++------------------------------------------ 1 file changed, 19 insertions(+), 52 deletions(-) (limited to 'kernel') diff --git a/kernel/padata.c b/kernel/padata.c index ae8defc..450d67d 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -170,79 +170,47 @@ EXPORT_SYMBOL(padata_do_parallel); */ static struct padata_priv *padata_get_next(struct parallel_data *pd) { - int cpu, num_cpus, empty, calc_seq_nr; - int seq_nr, next_nr, overrun, next_overrun; + int cpu, num_cpus; + int next_nr, next_index; struct padata_queue *queue, *next_queue; struct padata_priv *padata; struct padata_list *reorder; - empty = 0; - next_nr = -1; - next_overrun = 0; - next_queue = NULL; - num_cpus = cpumask_weight(pd->cpumask); - for_each_cpu(cpu, pd->cpumask) { - queue = per_cpu_ptr(pd->queue, cpu); - reorder = &queue->reorder; - - /* - * Calculate the seq_nr of the object that should be - * next in this reorder queue. - */ - overrun = 0; - calc_seq_nr = (atomic_read(&queue->num_obj) * num_cpus) - + queue->cpu_index; - - if (unlikely(calc_seq_nr > pd->max_seq_nr)) { - calc_seq_nr = calc_seq_nr - pd->max_seq_nr - 1; - overrun = 1; - } - - if (!list_empty(&reorder->list)) { - padata = list_entry(reorder->list.next, - struct padata_priv, list); - - seq_nr = padata->seq_nr; - BUG_ON(calc_seq_nr != seq_nr); - } else { - seq_nr = calc_seq_nr; - empty++; - } - - if (next_nr < 0 || seq_nr < next_nr - || (next_overrun && !overrun)) { - next_nr = seq_nr; - next_overrun = overrun; - next_queue = queue; - } + /* + * Calculate the percpu reorder queue and the sequence + * number of the next object. + */ + next_nr = pd->processed; + next_index = next_nr % num_cpus; + cpu = padata_index_to_cpu(pd, next_index); + next_queue = per_cpu_ptr(pd->queue, cpu); + + if (unlikely(next_nr > pd->max_seq_nr)) { + next_nr = next_nr - pd->max_seq_nr - 1; + next_index = next_nr % num_cpus; + cpu = padata_index_to_cpu(pd, next_index); + next_queue = per_cpu_ptr(pd->queue, cpu); + pd->processed = 0; } padata = NULL; - if (empty == num_cpus) - goto out; - reorder = &next_queue->reorder; if (!list_empty(&reorder->list)) { padata = list_entry(reorder->list.next, struct padata_priv, list); - if (unlikely(next_overrun)) { - for_each_cpu(cpu, pd->cpumask) { - queue = per_cpu_ptr(pd->queue, cpu); - atomic_set(&queue->num_obj, 0); - } - } + BUG_ON(next_nr != padata->seq_nr); spin_lock(&reorder->lock); list_del_init(&padata->list); atomic_dec(&pd->reorder_objects); spin_unlock(&reorder->lock); - atomic_inc(&next_queue->num_obj); + pd->processed++; goto out; } @@ -430,7 +398,6 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, INIT_WORK(&queue->pwork, padata_parallel_worker); INIT_WORK(&queue->swork, padata_serial_worker); - atomic_set(&queue->num_obj, 0); } num_cpus = cpumask_weight(pd->cpumask); -- cgit v1.1 From 5d550467b9770042e9699690907babc32104a8d4 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 15 Jul 2010 23:27:35 +0200 Subject: tracing: Remove ksym tracer The ksym (breakpoint) ftrace plugin has been superseded by perf tools that are much more poweful to use the cpu breakpoints. This tracer doesn't bring more feature. It has been deprecated for a while now, lets remove it. Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Prasad Cc: Ingo Molnar --- kernel/trace/Kconfig | 22 -- kernel/trace/Makefile | 1 - kernel/trace/trace.h | 6 - kernel/trace/trace_entries.h | 15 -- kernel/trace/trace_ksym.c | 508 ------------------------------------------ kernel/trace/trace_selftest.c | 54 ----- 6 files changed, 606 deletions(-) delete mode 100644 kernel/trace/trace_ksym.c (limited to 'kernel') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index f669092..f5306cb 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -308,28 +308,6 @@ config BRANCH_TRACER Say N if unsure. -config KSYM_TRACER - bool "Trace read and write access on kernel memory locations" - depends on HAVE_HW_BREAKPOINT - select TRACING - help - This tracer helps find read and write operations on any given kernel - symbol i.e. /proc/kallsyms. - -config PROFILE_KSYM_TRACER - bool "Profile all kernel memory accesses on 'watched' variables" - depends on KSYM_TRACER - help - This tracer profiles kernel accesses on variables watched through the - ksym tracer ftrace plugin. Depending upon the hardware, all read - and write operations on kernel variables can be monitored for - accesses. - - The results will be displayed in: - /debugfs/tracing/profile_ksym - - Say N if unsure. - config STACK_TRACER bool "Trace max stack" depends on HAVE_FUNCTION_TRACER diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 469a1c7..84b2c99 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -53,7 +53,6 @@ obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o endif obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o -obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o obj-$(CONFIG_EVENT_TRACING) += power-traces.o libftrace-y := ftrace.o diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index cc90ccd..84d3f12 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -30,7 +30,6 @@ enum trace_type { TRACE_GRAPH_ENT, TRACE_USER_STACK, TRACE_BLK, - TRACE_KSYM, __TRACE_LAST_TYPE, }; @@ -200,7 +199,6 @@ extern void __ftrace_bad_type(void); TRACE_GRAPH_ENT); \ IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ TRACE_GRAPH_RET); \ - IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\ __ftrace_bad_type(); \ } while (0) @@ -361,8 +359,6 @@ int register_tracer(struct tracer *type); void unregister_tracer(struct tracer *type); int is_tracing_stopped(void); -extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); - extern unsigned long nsecs_to_usecs(unsigned long nsecs); extern unsigned long tracing_thresh; @@ -436,8 +432,6 @@ extern int trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr); extern int trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr); -extern int trace_selftest_startup_ksym(struct tracer *trace, - struct trace_array *tr); #endif /* CONFIG_FTRACE_STARTUP_TEST */ extern void *head_page(struct trace_array_cpu *data); diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index 13abc15..8412837 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -291,18 +291,3 @@ FTRACE_ENTRY(branch, trace_branch, __entry->func, __entry->file, __entry->correct) ); -FTRACE_ENTRY(ksym_trace, ksym_trace_entry, - - TRACE_KSYM, - - F_STRUCT( - __field( unsigned long, ip ) - __field( unsigned char, type ) - __array( char , cmd, TASK_COMM_LEN ) - __field( unsigned long, addr ) - ), - - F_printk("ip: %pF type: %d ksym_name: %pS cmd: %s", - (void *)__entry->ip, (unsigned int)__entry->type, - (void *)__entry->addr, __entry->cmd) -); diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c deleted file mode 100644 index 8eaf007..0000000 --- a/kernel/trace/trace_ksym.c +++ /dev/null @@ -1,508 +0,0 @@ -/* - * trace_ksym.c - Kernel Symbol Tracer - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) IBM Corporation, 2009 - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "trace_output.h" -#include "trace.h" - -#include -#include - -#include - -#define KSYM_TRACER_OP_LEN 3 /* rw- */ - -struct trace_ksym { - struct perf_event **ksym_hbp; - struct perf_event_attr attr; -#ifdef CONFIG_PROFILE_KSYM_TRACER - atomic64_t counter; -#endif - struct hlist_node ksym_hlist; -}; - -static struct trace_array *ksym_trace_array; - -static unsigned int ksym_tracing_enabled; - -static HLIST_HEAD(ksym_filter_head); - -static DEFINE_MUTEX(ksym_tracer_mutex); - -#ifdef CONFIG_PROFILE_KSYM_TRACER - -#define MAX_UL_INT 0xffffffff - -void ksym_collect_stats(unsigned long hbp_hit_addr) -{ - struct hlist_node *node; - struct trace_ksym *entry; - - rcu_read_lock(); - hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) { - if (entry->attr.bp_addr == hbp_hit_addr) { - atomic64_inc(&entry->counter); - break; - } - } - rcu_read_unlock(); -} -#endif /* CONFIG_PROFILE_KSYM_TRACER */ - -void ksym_hbp_handler(struct perf_event *hbp, int nmi, - struct perf_sample_data *data, - struct pt_regs *regs) -{ - struct ring_buffer_event *event; - struct ksym_trace_entry *entry; - struct ring_buffer *buffer; - int pc; - - if (!ksym_tracing_enabled) - return; - - buffer = ksym_trace_array->buffer; - - pc = preempt_count(); - - event = trace_buffer_lock_reserve(buffer, TRACE_KSYM, - sizeof(*entry), 0, pc); - if (!event) - return; - - entry = ring_buffer_event_data(event); - entry->ip = instruction_pointer(regs); - entry->type = hw_breakpoint_type(hbp); - entry->addr = hw_breakpoint_addr(hbp); - strlcpy(entry->cmd, current->comm, TASK_COMM_LEN); - -#ifdef CONFIG_PROFILE_KSYM_TRACER - ksym_collect_stats(hw_breakpoint_addr(hbp)); -#endif /* CONFIG_PROFILE_KSYM_TRACER */ - - trace_buffer_unlock_commit(buffer, event, 0, pc); -} - -/* Valid access types are represented as - * - * rw- : Set Read/Write Access Breakpoint - * -w- : Set Write Access Breakpoint - * --- : Clear Breakpoints - * --x : Set Execution Break points (Not available yet) - * - */ -static int ksym_trace_get_access_type(char *str) -{ - int access = 0; - - if (str[0] == 'r') - access |= HW_BREAKPOINT_R; - - if (str[1] == 'w') - access |= HW_BREAKPOINT_W; - - if (str[2] == 'x') - access |= HW_BREAKPOINT_X; - - switch (access) { - case HW_BREAKPOINT_R: - case HW_BREAKPOINT_W: - case HW_BREAKPOINT_W | HW_BREAKPOINT_R: - return access; - default: - return -EINVAL; - } -} - -/* - * There can be several possible malformed requests and we attempt to capture - * all of them. We enumerate some of the rules - * 1. We will not allow kernel symbols with ':' since it is used as a delimiter. - * i.e. multiple ':' symbols disallowed. Possible uses are of the form - * ::. - * 2. No delimiter symbol ':' in the input string - * 3. Spurious operator symbols or symbols not in their respective positions - * 4. :--- i.e. clear breakpoint request when ksym_name not in file - * 5. Kernel symbol not a part of /proc/kallsyms - * 6. Duplicate requests - */ -static int parse_ksym_trace_str(char *input_string, char **ksymname, - unsigned long *addr) -{ - int ret; - - *ksymname = strsep(&input_string, ":"); - *addr = kallsyms_lookup_name(*ksymname); - - /* Check for malformed request: (2), (1) and (5) */ - if ((!input_string) || - (strlen(input_string) != KSYM_TRACER_OP_LEN) || - (*addr == 0)) - return -EINVAL;; - - ret = ksym_trace_get_access_type(input_string); - - return ret; -} - -int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) -{ - struct trace_ksym *entry; - int ret = -ENOMEM; - - entry = kzalloc(sizeof(struct trace_ksym), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - hw_breakpoint_init(&entry->attr); - - entry->attr.bp_type = op; - entry->attr.bp_addr = addr; - entry->attr.bp_len = HW_BREAKPOINT_LEN_4; - - entry->ksym_hbp = register_wide_hw_breakpoint(&entry->attr, - ksym_hbp_handler); - - if (IS_ERR(entry->ksym_hbp)) { - ret = PTR_ERR(entry->ksym_hbp); - if (ret == -ENOSPC) { - printk(KERN_ERR "ksym_tracer: Maximum limit reached." - " No new requests for tracing can be accepted now.\n"); - } else { - printk(KERN_INFO "ksym_tracer request failed. Try again" - " later!!\n"); - } - goto err; - } - - hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head); - - return 0; - -err: - kfree(entry); - - return ret; -} - -static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf, - size_t count, loff_t *ppos) -{ - struct trace_ksym *entry; - struct hlist_node *node; - struct trace_seq *s; - ssize_t cnt = 0; - int ret; - - s = kmalloc(sizeof(*s), GFP_KERNEL); - if (!s) - return -ENOMEM; - trace_seq_init(s); - - mutex_lock(&ksym_tracer_mutex); - - hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { - ret = trace_seq_printf(s, "%pS:", - (void *)(unsigned long)entry->attr.bp_addr); - if (entry->attr.bp_type == HW_BREAKPOINT_R) - ret = trace_seq_puts(s, "r--\n"); - else if (entry->attr.bp_type == HW_BREAKPOINT_W) - ret = trace_seq_puts(s, "-w-\n"); - else if (entry->attr.bp_type == (HW_BREAKPOINT_W | HW_BREAKPOINT_R)) - ret = trace_seq_puts(s, "rw-\n"); - WARN_ON_ONCE(!ret); - } - - cnt = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); - - mutex_unlock(&ksym_tracer_mutex); - - kfree(s); - - return cnt; -} - -static void __ksym_trace_reset(void) -{ - struct trace_ksym *entry; - struct hlist_node *node, *node1; - - mutex_lock(&ksym_tracer_mutex); - hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head, - ksym_hlist) { - unregister_wide_hw_breakpoint(entry->ksym_hbp); - hlist_del_rcu(&(entry->ksym_hlist)); - synchronize_rcu(); - kfree(entry); - } - mutex_unlock(&ksym_tracer_mutex); -} - -static ssize_t ksym_trace_filter_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *ppos) -{ - struct trace_ksym *entry; - struct hlist_node *node; - char *buf, *input_string, *ksymname = NULL; - unsigned long ksym_addr = 0; - int ret, op, changed = 0; - - buf = kzalloc(count + 1, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - ret = -EFAULT; - if (copy_from_user(buf, buffer, count)) - goto out; - - buf[count] = '\0'; - input_string = strstrip(buf); - - /* - * Clear all breakpoints if: - * 1: echo > ksym_trace_filter - * 2: echo 0 > ksym_trace_filter - * 3: echo "*:---" > ksym_trace_filter - */ - if (!input_string[0] || !strcmp(input_string, "0") || - !strcmp(input_string, "*:---")) { - __ksym_trace_reset(); - ret = 0; - goto out; - } - - ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr); - if (ret < 0) - goto out; - - mutex_lock(&ksym_tracer_mutex); - - ret = -EINVAL; - hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { - if (entry->attr.bp_addr == ksym_addr) { - /* Check for malformed request: (6) */ - if (entry->attr.bp_type != op) - changed = 1; - else - goto out_unlock; - break; - } - } - if (changed) { - unregister_wide_hw_breakpoint(entry->ksym_hbp); - entry->attr.bp_type = op; - ret = 0; - if (op > 0) { - entry->ksym_hbp = - register_wide_hw_breakpoint(&entry->attr, - ksym_hbp_handler); - if (IS_ERR(entry->ksym_hbp)) - ret = PTR_ERR(entry->ksym_hbp); - else - goto out_unlock; - } - /* Error or "symbol:---" case: drop it */ - hlist_del_rcu(&(entry->ksym_hlist)); - synchronize_rcu(); - kfree(entry); - goto out_unlock; - } else { - /* Check for malformed request: (4) */ - if (op) - ret = process_new_ksym_entry(ksymname, op, ksym_addr); - } -out_unlock: - mutex_unlock(&ksym_tracer_mutex); -out: - kfree(buf); - return !ret ? count : ret; -} - -static const struct file_operations ksym_tracing_fops = { - .open = tracing_open_generic, - .read = ksym_trace_filter_read, - .write = ksym_trace_filter_write, -}; - -static void ksym_trace_reset(struct trace_array *tr) -{ - ksym_tracing_enabled = 0; - __ksym_trace_reset(); -} - -static int ksym_trace_init(struct trace_array *tr) -{ - int cpu, ret = 0; - - for_each_online_cpu(cpu) - tracing_reset(tr, cpu); - ksym_tracing_enabled = 1; - ksym_trace_array = tr; - - return ret; -} - -static void ksym_trace_print_header(struct seq_file *m) -{ - seq_puts(m, - "# TASK-PID CPU# Symbol " - "Type Function\n"); - seq_puts(m, - "# | | | " - " | |\n"); -} - -static enum print_line_t ksym_trace_output(struct trace_iterator *iter) -{ - struct trace_entry *entry = iter->ent; - struct trace_seq *s = &iter->seq; - struct ksym_trace_entry *field; - char str[KSYM_SYMBOL_LEN]; - int ret; - - if (entry->type != TRACE_KSYM) - return TRACE_TYPE_UNHANDLED; - - trace_assign_type(field, entry); - - ret = trace_seq_printf(s, "%11s-%-5d [%03d] %pS", field->cmd, - entry->pid, iter->cpu, (char *)field->addr); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - switch (field->type) { - case HW_BREAKPOINT_R: - ret = trace_seq_printf(s, " R "); - break; - case HW_BREAKPOINT_W: - ret = trace_seq_printf(s, " W "); - break; - case HW_BREAKPOINT_R | HW_BREAKPOINT_W: - ret = trace_seq_printf(s, " RW "); - break; - default: - return TRACE_TYPE_PARTIAL_LINE; - } - - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - sprint_symbol(str, field->ip); - ret = trace_seq_printf(s, "%s\n", str); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - return TRACE_TYPE_HANDLED; -} - -struct tracer ksym_tracer __read_mostly = -{ - .name = "ksym_tracer", - .init = ksym_trace_init, - .reset = ksym_trace_reset, -#ifdef CONFIG_FTRACE_SELFTEST - .selftest = trace_selftest_startup_ksym, -#endif - .print_header = ksym_trace_print_header, - .print_line = ksym_trace_output -}; - -#ifdef CONFIG_PROFILE_KSYM_TRACER -static int ksym_profile_show(struct seq_file *m, void *v) -{ - struct hlist_node *node; - struct trace_ksym *entry; - int access_type = 0; - char fn_name[KSYM_NAME_LEN]; - - seq_puts(m, " Access Type "); - seq_puts(m, " Symbol Counter\n"); - seq_puts(m, " ----------- "); - seq_puts(m, " ------ -------\n"); - - rcu_read_lock(); - hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) { - - access_type = entry->attr.bp_type; - - switch (access_type) { - case HW_BREAKPOINT_R: - seq_puts(m, " R "); - break; - case HW_BREAKPOINT_W: - seq_puts(m, " W "); - break; - case HW_BREAKPOINT_R | HW_BREAKPOINT_W: - seq_puts(m, " RW "); - break; - default: - seq_puts(m, " NA "); - } - - if (lookup_symbol_name(entry->attr.bp_addr, fn_name) >= 0) - seq_printf(m, " %-36s", fn_name); - else - seq_printf(m, " %-36s", ""); - seq_printf(m, " %15llu\n", - (unsigned long long)atomic64_read(&entry->counter)); - } - rcu_read_unlock(); - - return 0; -} - -static int ksym_profile_open(struct inode *node, struct file *file) -{ - return single_open(file, ksym_profile_show, NULL); -} - -static const struct file_operations ksym_profile_fops = { - .open = ksym_profile_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; -#endif /* CONFIG_PROFILE_KSYM_TRACER */ - -__init static int init_ksym_trace(void) -{ - struct dentry *d_tracer; - - d_tracer = tracing_init_dentry(); - - trace_create_file("ksym_trace_filter", 0644, d_tracer, - NULL, &ksym_tracing_fops); - -#ifdef CONFIG_PROFILE_KSYM_TRACER - trace_create_file("ksym_profile", 0444, d_tracer, - NULL, &ksym_profile_fops); -#endif - - return register_tracer(&ksym_tracer); -} -device_initcall(init_ksym_trace); diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 250e7f9..39a5ca4 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -17,7 +17,6 @@ static inline int trace_valid_entry(struct trace_entry *entry) case TRACE_BRANCH: case TRACE_GRAPH_ENT: case TRACE_GRAPH_RET: - case TRACE_KSYM: return 1; } return 0; @@ -755,56 +754,3 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) } #endif /* CONFIG_BRANCH_TRACER */ -#ifdef CONFIG_KSYM_TRACER -static int ksym_selftest_dummy; - -int -trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr) -{ - unsigned long count; - int ret; - - /* start the tracing */ - ret = tracer_init(trace, tr); - if (ret) { - warn_failed_init_tracer(trace, ret); - return ret; - } - - ksym_selftest_dummy = 0; - /* Register the read-write tracing request */ - - ret = process_new_ksym_entry("ksym_selftest_dummy", - HW_BREAKPOINT_R | HW_BREAKPOINT_W, - (unsigned long)(&ksym_selftest_dummy)); - - if (ret < 0) { - printk(KERN_CONT "ksym_trace read-write startup test failed\n"); - goto ret_path; - } - /* Perform a read and a write operation over the dummy variable to - * trigger the tracer - */ - if (ksym_selftest_dummy == 0) - ksym_selftest_dummy++; - - /* stop the tracing. */ - tracing_stop(); - /* check the trace buffer */ - ret = trace_test_buffer(tr, &count); - trace->reset(tr); - tracing_start(); - - /* read & write operations - one each is performed on the dummy variable - * triggering two entries in the trace buffer - */ - if (!ret && count != 2) { - printk(KERN_CONT "Ksym tracer startup test failed"); - ret = -1; - } - -ret_path: - return ret; -} -#endif /* CONFIG_KSYM_TRACER */ - -- cgit v1.1 From 8fd00b4d7014b00448eb33cf0590815304769798 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Wed, 26 Aug 2009 18:41:16 +0200 Subject: rlimits: security, add task_struct to setrlimit Add task_struct to task_setrlimit of security_operations to be able to set rlimit of task other than current. Signed-off-by: Jiri Slaby Acked-by: Eric Paris Acked-by: James Morris --- kernel/sys.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index e83ddbb..1ba4522 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1290,7 +1290,7 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open) return -EPERM; - retval = security_task_setrlimit(resource, &new_rlim); + retval = security_task_setrlimit(current, resource, &new_rlim); if (retval) return retval; -- cgit v1.1 From 5ab46b345e418747b3a52f0892680c0745c4223c Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 28 Aug 2009 14:05:12 +0200 Subject: rlimits: add task_struct to update_rlimit_cpu Add task_struct as a parameter to update_rlimit_cpu to be able to set rlimit_cpu of different task than current. Signed-off-by: Jiri Slaby Acked-by: James Morris --- kernel/posix-cpu-timers.c | 8 ++++---- kernel/sys.c | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 9829646..0513900 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -16,13 +16,13 @@ * siglock protection since other code may update expiration cache as * well. */ -void update_rlimit_cpu(unsigned long rlim_new) +void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) { cputime_t cputime = secs_to_cputime(rlim_new); - spin_lock_irq(¤t->sighand->siglock); - set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); - spin_unlock_irq(¤t->sighand->siglock); + spin_lock_irq(&task->sighand->siglock); + set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL); + spin_unlock_irq(&task->sighand->siglock); } static int check_clock(const clockid_t which_clock) diff --git a/kernel/sys.c b/kernel/sys.c index 1ba4522..f5183b0 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1320,7 +1320,7 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) if (new_rlim.rlim_cur == RLIM_INFINITY) goto out; - update_rlimit_cpu(new_rlim.rlim_cur); + update_rlimit_cpu(current, new_rlim.rlim_cur); out: return 0; } -- cgit v1.1 From 2fb9d2689a0041b88b25bc3187eada2968e25995 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 3 Sep 2009 19:21:45 +0200 Subject: rlimits: make sure ->rlim_max never grows in sys_setrlimit Mostly preparation for Jiri's changes, but probably makes sense anyway. sys_setrlimit() checks new_rlim.rlim_max <= old_rlim->rlim_max, but when it takes task_lock() old_rlim->rlim_max can be already lowered. Move this check under task_lock(). Currently this is not important, we can only race with our sub-thread, this means the application is stupid. But when we change the code to allow the update of !current task's limits, it becomes important to make sure ->rlim_max can be lowered "reliably" even if we race with the application doing sys_setrlimit(). Signed-off-by: Oleg Nesterov Signed-off-by: Jiri Slaby --- kernel/sys.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index f5183b0..f2b2d7a 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1283,10 +1283,6 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) return -EFAULT; if (new_rlim.rlim_cur > new_rlim.rlim_max) return -EINVAL; - old_rlim = current->signal->rlim + resource; - if ((new_rlim.rlim_max > old_rlim->rlim_max) && - !capable(CAP_SYS_RESOURCE)) - return -EPERM; if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open) return -EPERM; @@ -1304,11 +1300,16 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) new_rlim.rlim_cur = 1; } + old_rlim = current->signal->rlim + resource; task_lock(current->group_leader); - *old_rlim = new_rlim; + if (new_rlim.rlim_max > old_rlim->rlim_max && + !capable(CAP_SYS_RESOURCE)) + retval = -EPERM; + else + *old_rlim = new_rlim; task_unlock(current->group_leader); - if (resource != RLIMIT_CPU) + if (retval || resource != RLIMIT_CPU) goto out; /* @@ -1322,7 +1323,7 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) update_rlimit_cpu(current, new_rlim.rlim_cur); out: - return 0; + return retval; } /* -- cgit v1.1 From 7855c35da7ba16b389d17710401c4a55a3ea2102 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Wed, 26 Aug 2009 23:45:34 +0200 Subject: rlimits: split sys_setrlimit Create do_setrlimit from sys_setrlimit and declare do_setrlimit in the resource header. This is the first phase to have generic do_prlimit which allows to be called from read, write and compat rlimits code. The new do_setrlimit also accepts a task pointer to change the limits of. Currently, it cannot be other than current, but this will change with locking later. Also pass tsk->group_leader to security_task_setrlimit to check whether current is allowed to change rlimits of the process and not its arbitrary thread because it makes more sense given that rlimit are per process and not per-thread. Signed-off-by: Jiri Slaby --- kernel/sys.c | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index f2b2d7a..b5b96e3 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1272,42 +1272,41 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, #endif -SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) +int do_setrlimit(struct task_struct *tsk, unsigned int resource, + struct rlimit *new_rlim) { - struct rlimit new_rlim, *old_rlim; + struct rlimit *old_rlim; int retval; if (resource >= RLIM_NLIMITS) return -EINVAL; - if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) - return -EFAULT; - if (new_rlim.rlim_cur > new_rlim.rlim_max) + if (new_rlim->rlim_cur > new_rlim->rlim_max) return -EINVAL; - if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open) + if (resource == RLIMIT_NOFILE && new_rlim->rlim_max > sysctl_nr_open) return -EPERM; - retval = security_task_setrlimit(current, resource, &new_rlim); + retval = security_task_setrlimit(tsk->group_leader, resource, new_rlim); if (retval) return retval; - if (resource == RLIMIT_CPU && new_rlim.rlim_cur == 0) { + if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) { /* * The caller is asking for an immediate RLIMIT_CPU * expiry. But we use the zero value to mean "it was * never set". So let's cheat and make it one second * instead */ - new_rlim.rlim_cur = 1; + new_rlim->rlim_cur = 1; } - old_rlim = current->signal->rlim + resource; - task_lock(current->group_leader); - if (new_rlim.rlim_max > old_rlim->rlim_max && + old_rlim = tsk->signal->rlim + resource; + task_lock(tsk->group_leader); + if (new_rlim->rlim_max > old_rlim->rlim_max && !capable(CAP_SYS_RESOURCE)) retval = -EPERM; else - *old_rlim = new_rlim; - task_unlock(current->group_leader); + *old_rlim = *new_rlim; + task_unlock(tsk->group_leader); if (retval || resource != RLIMIT_CPU) goto out; @@ -1318,14 +1317,23 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) * very long-standing error, and fixing it now risks breakage of * applications, so we live with it */ - if (new_rlim.rlim_cur == RLIM_INFINITY) + if (new_rlim->rlim_cur == RLIM_INFINITY) goto out; - update_rlimit_cpu(current, new_rlim.rlim_cur); + update_rlimit_cpu(tsk, new_rlim->rlim_cur); out: return retval; } +SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) +{ + struct rlimit new_rlim; + + if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) + return -EFAULT; + return do_setrlimit(current, resource, &new_rlim); +} + /* * It would make sense to put struct rusage in the task_struct, * except that would make the task_struct be *really big*. After -- cgit v1.1 From 1c1e618ddd15f69fd87ccea596769f78c8065504 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 28 Aug 2009 14:08:17 +0200 Subject: rlimits: allow setrlimit to non-current tasks Add locking to allow setrlimit accept task parameter other than current. Namely, lock tasklist_lock for read and check whether the task structure has sighand non-null. Do all the signal processing under that lock still held. There are some points: 1) security_task_setrlimit is now called with that lock held. This is not new, many security_* functions are called with this lock held already so it doesn't harm (all this security_* stuff does almost the same). 2) task->sighand->siglock (in update_rlimit_cpu) is nested in tasklist_lock. This dependence is already existing. 3) tsk->alloc_lock is nested in tasklist_lock. This is OK too, already existing dependence. Signed-off-by: Jiri Slaby Cc: Oleg Nesterov --- kernel/sys.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index b5b96e3..9dbcbbc 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1272,6 +1272,7 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, #endif +/* make sure you are allowed to change @tsk limits before calling this */ int do_setrlimit(struct task_struct *tsk, unsigned int resource, struct rlimit *new_rlim) { @@ -1285,9 +1286,16 @@ int do_setrlimit(struct task_struct *tsk, unsigned int resource, if (resource == RLIMIT_NOFILE && new_rlim->rlim_max > sysctl_nr_open) return -EPERM; + /* protect tsk->signal and tsk->sighand from disappearing */ + read_lock(&tasklist_lock); + if (!tsk->sighand) { + retval = -ESRCH; + goto out; + } + retval = security_task_setrlimit(tsk->group_leader, resource, new_rlim); if (retval) - return retval; + goto out; if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) { /* @@ -1322,6 +1330,7 @@ int do_setrlimit(struct task_struct *tsk, unsigned int resource, update_rlimit_cpu(tsk, new_rlim->rlim_cur); out: + read_unlock(&tasklist_lock); return retval; } -- cgit v1.1 From 86f162f4c75ceb6daf43165469eeeca1bc3d4639 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Sat, 14 Nov 2009 17:37:04 +0100 Subject: rlimits: do security check under task_lock Do security_task_setrlimit under task_lock. Other tasks may change limits under our hands while we are checking limits inside the function. From now on, they can't. Note that all the security work is done under a spinlock here now. Security hooks count with that, they are called from interrupt context (like security_task_kill) and with spinlocks already held (e.g. capable->security_capable). Signed-off-by: Jiri Slaby Acked-by: James Morris Cc: Heiko Carstens --- kernel/sys.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index 9dbcbbc..c762eeb 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1277,7 +1277,7 @@ int do_setrlimit(struct task_struct *tsk, unsigned int resource, struct rlimit *new_rlim) { struct rlimit *old_rlim; - int retval; + int retval = 0; if (resource >= RLIM_NLIMITS) return -EINVAL; @@ -1293,9 +1293,14 @@ int do_setrlimit(struct task_struct *tsk, unsigned int resource, goto out; } - retval = security_task_setrlimit(tsk->group_leader, resource, new_rlim); - if (retval) - goto out; + old_rlim = tsk->signal->rlim + resource; + task_lock(tsk->group_leader); + if (new_rlim->rlim_max > old_rlim->rlim_max && + !capable(CAP_SYS_RESOURCE)) + retval = -EPERM; + if (!retval) + retval = security_task_setrlimit(tsk->group_leader, resource, + new_rlim); if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) { /* @@ -1307,12 +1312,7 @@ int do_setrlimit(struct task_struct *tsk, unsigned int resource, new_rlim->rlim_cur = 1; } - old_rlim = tsk->signal->rlim + resource; - task_lock(tsk->group_leader); - if (new_rlim->rlim_max > old_rlim->rlim_max && - !capable(CAP_SYS_RESOURCE)) - retval = -EPERM; - else + if (!retval) *old_rlim = *new_rlim; task_unlock(tsk->group_leader); -- cgit v1.1 From 5b41535aac0c07135ff6a4c5c2ae115d1c20c0bc Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Wed, 24 Mar 2010 16:11:29 +0100 Subject: rlimits: redo do_setrlimit to more generic do_prlimit It now allows also reading of limits. I.e. all read and writes will later use this function. It takes two parameters, new and old limits which can be both NULL. If new is non-NULL, the value in it is set to rlimits. If old is non-NULL, current rlimits are stored there. If both are non-NULL, old are stored prior to setting the new ones, atomically. (Similar to sigaction.) Signed-off-by: Jiri Slaby --- kernel/sys.c | 71 +++++++++++++++++++++++++++++++----------------------------- 1 file changed, 37 insertions(+), 34 deletions(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index c762eeb..bc7d1be 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1273,18 +1273,21 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, #endif /* make sure you are allowed to change @tsk limits before calling this */ -int do_setrlimit(struct task_struct *tsk, unsigned int resource, - struct rlimit *new_rlim) +int do_prlimit(struct task_struct *tsk, unsigned int resource, + struct rlimit *new_rlim, struct rlimit *old_rlim) { - struct rlimit *old_rlim; + struct rlimit *rlim; int retval = 0; if (resource >= RLIM_NLIMITS) return -EINVAL; - if (new_rlim->rlim_cur > new_rlim->rlim_max) - return -EINVAL; - if (resource == RLIMIT_NOFILE && new_rlim->rlim_max > sysctl_nr_open) - return -EPERM; + if (new_rlim) { + if (new_rlim->rlim_cur > new_rlim->rlim_max) + return -EINVAL; + if (resource == RLIMIT_NOFILE && + new_rlim->rlim_max > sysctl_nr_open) + return -EPERM; + } /* protect tsk->signal and tsk->sighand from disappearing */ read_lock(&tasklist_lock); @@ -1293,42 +1296,42 @@ int do_setrlimit(struct task_struct *tsk, unsigned int resource, goto out; } - old_rlim = tsk->signal->rlim + resource; + rlim = tsk->signal->rlim + resource; task_lock(tsk->group_leader); - if (new_rlim->rlim_max > old_rlim->rlim_max && - !capable(CAP_SYS_RESOURCE)) - retval = -EPERM; - if (!retval) - retval = security_task_setrlimit(tsk->group_leader, resource, - new_rlim); - - if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) { - /* - * The caller is asking for an immediate RLIMIT_CPU - * expiry. But we use the zero value to mean "it was - * never set". So let's cheat and make it one second - * instead - */ - new_rlim->rlim_cur = 1; + if (new_rlim) { + if (new_rlim->rlim_max > rlim->rlim_max && + !capable(CAP_SYS_RESOURCE)) + retval = -EPERM; + if (!retval) + retval = security_task_setrlimit(tsk->group_leader, + resource, new_rlim); + if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) { + /* + * The caller is asking for an immediate RLIMIT_CPU + * expiry. But we use the zero value to mean "it was + * never set". So let's cheat and make it one second + * instead + */ + new_rlim->rlim_cur = 1; + } + } + if (!retval) { + if (old_rlim) + *old_rlim = *rlim; + if (new_rlim) + *rlim = *new_rlim; } - - if (!retval) - *old_rlim = *new_rlim; task_unlock(tsk->group_leader); - if (retval || resource != RLIMIT_CPU) - goto out; - /* * RLIMIT_CPU handling. Note that the kernel fails to return an error * code if it rejected the user's attempt to set RLIMIT_CPU. This is a * very long-standing error, and fixing it now risks breakage of * applications, so we live with it */ - if (new_rlim->rlim_cur == RLIM_INFINITY) - goto out; - - update_rlimit_cpu(tsk, new_rlim->rlim_cur); + if (!retval && new_rlim && resource == RLIMIT_CPU && + new_rlim->rlim_cur != RLIM_INFINITY) + update_rlimit_cpu(tsk, new_rlim->rlim_cur); out: read_unlock(&tasklist_lock); return retval; @@ -1340,7 +1343,7 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) return -EFAULT; - return do_setrlimit(current, resource, &new_rlim); + return do_prlimit(current, resource, &new_rlim, NULL); } /* -- cgit v1.1 From b95183453af2ed14a5c7027e58049c9fd17e92ce Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 4 May 2010 11:28:25 +0200 Subject: rlimits: switch more rlimit syscalls to do_prlimit After we added more generic do_prlimit, switch sys_getrlimit to that. Also switch compat handling, so we can get rid of ugly __user casts and avoid setting process' address limit to kernel data and back. Signed-off-by: Jiri Slaby --- kernel/compat.c | 17 +++-------------- kernel/sys.c | 17 ++++++++--------- 2 files changed, 11 insertions(+), 23 deletions(-) (limited to 'kernel') diff --git a/kernel/compat.c b/kernel/compat.c index 5adab05..e167efc 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -279,11 +279,6 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource, struct compat_rlimit __user *rlim) { struct rlimit r; - int ret; - mm_segment_t old_fs = get_fs (); - - if (resource >= RLIM_NLIMITS) - return -EINVAL; if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) || __get_user(r.rlim_cur, &rlim->rlim_cur) || @@ -294,10 +289,7 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource, r.rlim_cur = RLIM_INFINITY; if (r.rlim_max == COMPAT_RLIM_INFINITY) r.rlim_max = RLIM_INFINITY; - set_fs(KERNEL_DS); - ret = sys_setrlimit(resource, (struct rlimit __user *) &r); - set_fs(old_fs); - return ret; + return do_prlimit(current, resource, &r, NULL); } #ifdef COMPAT_RLIM_OLD_INFINITY @@ -329,16 +321,13 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource, #endif -asmlinkage long compat_sys_getrlimit (unsigned int resource, +asmlinkage long compat_sys_getrlimit(unsigned int resource, struct compat_rlimit __user *rlim) { struct rlimit r; int ret; - mm_segment_t old_fs = get_fs(); - set_fs(KERNEL_DS); - ret = sys_getrlimit(resource, (struct rlimit __user *) &r); - set_fs(old_fs); + ret = do_prlimit(current, resource, NULL, &r); if (!ret) { if (r.rlim_cur > COMPAT_RLIM_INFINITY) r.rlim_cur = COMPAT_RLIM_INFINITY; diff --git a/kernel/sys.c b/kernel/sys.c index bc7d1be..9da98dd 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1236,15 +1236,14 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim) { - if (resource >= RLIM_NLIMITS) - return -EINVAL; - else { - struct rlimit value; - task_lock(current->group_leader); - value = current->signal->rlim[resource]; - task_unlock(current->group_leader); - return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; - } + struct rlimit value; + int ret; + + ret = do_prlimit(current, resource, NULL, &value); + if (!ret) + ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; + + return ret; } #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT -- cgit v1.1 From c022a0acad534fd5f5d5f17280f6d4d135e74e81 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 4 May 2010 18:03:50 +0200 Subject: rlimits: implement prlimit64 syscall This patch adds the code to support the sys_prlimit64 syscall which modifies-and-returns the rlim values of a selected process atomically. The first parameter, pid, being 0 means current process. Unlike the current implementation, it is a generic interface, architecture indepentent so that we needn't handle compat stuff anymore. In the future, after glibc start to use this we can deprecate sys_setrlimit and sys_getrlimit in favor to clean up the code finally. It also adds a possibility of changing limits of other processes. We check the user's permissions to do that and if it succeeds, the new limits are propagated online. This is good for large scale applications such as SAP or databases where administrators need to change limits time by time (e.g. on crashes increase core size). And it is unacceptable to restart the service. For safety, all rlim users now either use accessors or doesn't need them due to - locking - the fact a process was just forked and nobody else knows about it yet (and nobody can't thus read/write limits) hence it is safe to modify limits now. The limitation is that we currently stay at ulong internal representation. So the rlim64_is_infinity check is used where value is compared against ULONG_MAX on 32-bit which is the maximum value there. And since internally the limits are held in struct rlimit, converters which are used before and after do_prlimit call in sys_prlimit64 are introduced. Signed-off-by: Jiri Slaby --- kernel/sys.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index 9da98dd..e9ad444 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1271,6 +1271,39 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, #endif +static inline bool rlim64_is_infinity(__u64 rlim64) +{ +#if BITS_PER_LONG < 64 + return rlim64 >= ULONG_MAX; +#else + return rlim64 == RLIM64_INFINITY; +#endif +} + +static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64) +{ + if (rlim->rlim_cur == RLIM_INFINITY) + rlim64->rlim_cur = RLIM64_INFINITY; + else + rlim64->rlim_cur = rlim->rlim_cur; + if (rlim->rlim_max == RLIM_INFINITY) + rlim64->rlim_max = RLIM64_INFINITY; + else + rlim64->rlim_max = rlim->rlim_max; +} + +static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim) +{ + if (rlim64_is_infinity(rlim64->rlim_cur)) + rlim->rlim_cur = RLIM_INFINITY; + else + rlim->rlim_cur = (unsigned long)rlim64->rlim_cur; + if (rlim64_is_infinity(rlim64->rlim_max)) + rlim->rlim_max = RLIM_INFINITY; + else + rlim->rlim_max = (unsigned long)rlim64->rlim_max; +} + /* make sure you are allowed to change @tsk limits before calling this */ int do_prlimit(struct task_struct *tsk, unsigned int resource, struct rlimit *new_rlim, struct rlimit *old_rlim) @@ -1336,6 +1369,67 @@ out: return retval; } +/* rcu lock must be held */ +static int check_prlimit_permission(struct task_struct *task) +{ + const struct cred *cred = current_cred(), *tcred; + + tcred = __task_cred(task); + if ((cred->uid != tcred->euid || + cred->uid != tcred->suid || + cred->uid != tcred->uid || + cred->gid != tcred->egid || + cred->gid != tcred->sgid || + cred->gid != tcred->gid) && + !capable(CAP_SYS_RESOURCE)) { + return -EPERM; + } + + return 0; +} + +SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource, + const struct rlimit64 __user *, new_rlim, + struct rlimit64 __user *, old_rlim) +{ + struct rlimit64 old64, new64; + struct rlimit old, new; + struct task_struct *tsk; + int ret; + + if (new_rlim) { + if (copy_from_user(&new64, new_rlim, sizeof(new64))) + return -EFAULT; + rlim64_to_rlim(&new64, &new); + } + + rcu_read_lock(); + tsk = pid ? find_task_by_vpid(pid) : current; + if (!tsk) { + rcu_read_unlock(); + return -ESRCH; + } + ret = check_prlimit_permission(tsk); + if (ret) { + rcu_read_unlock(); + return ret; + } + get_task_struct(tsk); + rcu_read_unlock(); + + ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL, + old_rlim ? &old : NULL); + + if (!ret && old_rlim) { + rlim_to_rlim64(&old, &old64); + if (copy_to_user(old_rlim, &old64, sizeof(old64))) + ret = -EFAULT; + } + + put_task_struct(tsk); + return ret; +} + SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) { struct rlimit new_rlim; -- cgit v1.1 From 5343bdb8fd076f16edc9d113a9e35e2a1d1f4966 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Fri, 9 Jul 2010 15:19:54 +0200 Subject: sched: Update rq->clock for nohz balanced cpus Suresh spotted that we don't update the rq->clock in the nohz load-balancer path. Signed-off-by: Peter Zijlstra LKML-Reference: <1278626014.2834.74.camel@sbs-t61.sc.intel.com> Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index b4da534..e44a591 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -3596,6 +3596,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) } raw_spin_lock_irq(&this_rq->lock); + update_rq_clock(this_rq); update_cpu_load(this_rq); raw_spin_unlock_irq(&this_rq->lock); -- cgit v1.1 From bbc8cb5baead9607309583b20873ab0cc8d89eaf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 9 Jul 2010 15:15:43 +0200 Subject: sched: Reduce update_group_power() calls Currently we update cpu_power() too often, update_group_power() only updates the local group's cpu_power but it gets called for all groups. Furthermore, CPU_NEWLY_IDLE invocations will result in all cpus calling it, even though a slow update of cpu_power is sufficient. Therefore move the update under 'idle != CPU_NEWLY_IDLE && local_group' to reduce superfluous invocations. Reported-by: Venkatesh Pallipadi Signed-off-by: Peter Zijlstra Acked-by: Suresh Siddha LKML-Reference: <1278612989.1900.176.camel@laptop> Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index e44a591..c9ac097 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -2425,14 +2425,14 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, * domains. In the newly idle case, we will allow all the cpu's * to do the newly idle load balance. */ - if (idle != CPU_NEWLY_IDLE && local_group && - balance_cpu != this_cpu) { - *balance = 0; - return; + if (idle != CPU_NEWLY_IDLE && local_group) { + if (balance_cpu != this_cpu) { + *balance = 0; + return; + } + update_group_power(sd, this_cpu); } - update_group_power(sd, this_cpu); - /* Adjust by relative CPU power of the group */ sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power; -- cgit v1.1 From 396e894d289d69bacf5acd983c97cd6e21a14c08 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 9 Jul 2010 15:12:27 +0200 Subject: sched: Revert nohz_ratelimit() for now Norbert reported that nohz_ratelimit() causes his laptop to burn about 4W (40%) extra. For now back out the change and see if we can adjust the power management code to make better decisions. Reported-by: Norbert Preining Signed-off-by: Peter Zijlstra Acked-by: Mike Galbraith Cc: Arjan van de Ven LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/sched.c | 10 ---------- kernel/time/tick-sched.c | 2 +- 2 files changed, 1 insertion(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index f52a880..63b4a14 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1232,16 +1232,6 @@ void wake_up_idle_cpu(int cpu) smp_send_reschedule(cpu); } -int nohz_ratelimit(int cpu) -{ - struct rq *rq = cpu_rq(cpu); - u64 diff = rq->clock - rq->nohz_stamp; - - rq->nohz_stamp = rq->clock; - - return diff < (NSEC_PER_SEC / HZ) >> 1; -} - #endif /* CONFIG_NO_HZ */ static u64 sched_avg_period(void) diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 813993b..f898af6 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -325,7 +325,7 @@ void tick_nohz_stop_sched_tick(int inidle) } while (read_seqretry(&xtime_lock, seq)); if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || - arch_needs_cpu(cpu) || nohz_ratelimit(cpu)) { + arch_needs_cpu(cpu)) { next_jiffies = last_jiffies + 1; delta_jiffies = 1; } else { -- cgit v1.1 From 68c38fc3cb4e5a60f502ee9c45f3dfe70e5165ad Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Thu, 15 Jul 2010 23:18:22 +0300 Subject: sched: No need for bootmem special cases As of commit dcce284 ("mm: Extend gfp masking to the page allocator") and commit 7e85ee0 ("slab,slub: don't enable interrupts during early boot"), the slab allocator makes sure we don't attempt to sleep during boot. Therefore, remove bootmem special cases from the scheduler and use plain GFP_KERNEL instead. Signed-off-by: Pekka Enberg Cc: Peter Zijlstra LKML-Reference: <1279225102-2572-1-git-send-email-penberg@cs.helsinki.fi> Signed-off-by: Ingo Molnar --- kernel/sched.c | 19 +++++++------------ kernel/sched_cpupri.c | 8 ++------ kernel/sched_cpupri.h | 2 +- 3 files changed, 10 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 9064e7d..7b443ee 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6248,23 +6248,18 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) free_rootdomain(old_rd); } -static int init_rootdomain(struct root_domain *rd, bool bootmem) +static int init_rootdomain(struct root_domain *rd) { - gfp_t gfp = GFP_KERNEL; - memset(rd, 0, sizeof(*rd)); - if (bootmem) - gfp = GFP_NOWAIT; - - if (!alloc_cpumask_var(&rd->span, gfp)) + if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) goto out; - if (!alloc_cpumask_var(&rd->online, gfp)) + if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) goto free_span; - if (!alloc_cpumask_var(&rd->rto_mask, gfp)) + if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) goto free_online; - if (cpupri_init(&rd->cpupri, bootmem) != 0) + if (cpupri_init(&rd->cpupri) != 0) goto free_rto_mask; return 0; @@ -6280,7 +6275,7 @@ out: static void init_defrootdomain(void) { - init_rootdomain(&def_root_domain, true); + init_rootdomain(&def_root_domain); atomic_set(&def_root_domain.refcount, 1); } @@ -6293,7 +6288,7 @@ static struct root_domain *alloc_rootdomain(void) if (!rd) return NULL; - if (init_rootdomain(rd, false) != 0) { + if (init_rootdomain(rd) != 0) { kfree(rd); return NULL; } diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index e6871cb..2722dc1 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c @@ -166,14 +166,10 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) * * Returns: -ENOMEM if memory fails. */ -int cpupri_init(struct cpupri *cp, bool bootmem) +int cpupri_init(struct cpupri *cp) { - gfp_t gfp = GFP_KERNEL; int i; - if (bootmem) - gfp = GFP_NOWAIT; - memset(cp, 0, sizeof(*cp)); for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { @@ -181,7 +177,7 @@ int cpupri_init(struct cpupri *cp, bool bootmem) raw_spin_lock_init(&vec->lock); vec->count = 0; - if (!zalloc_cpumask_var(&vec->mask, gfp)) + if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) goto cleanup; } diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h index 7cb5bb6..9fc7d38 100644 --- a/kernel/sched_cpupri.h +++ b/kernel/sched_cpupri.h @@ -27,7 +27,7 @@ struct cpupri { int cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask); void cpupri_set(struct cpupri *cp, int cpu, int pri); -int cpupri_init(struct cpupri *cp, bool bootmem); +int cpupri_init(struct cpupri *cp); void cpupri_cleanup(struct cpupri *cp); #else #define cpupri_set(cp, cpu, pri) do { } while (0) -- cgit v1.1 From 90133673395849c9d4e66a563f2d0d91d92aa461 Mon Sep 17 00:00:00 2001 From: Cesar Eduardo Barros Date: Mon, 7 Jun 2010 22:23:12 +0200 Subject: PM / Hibernate: Fix typos in comments in kernel/power/swap.c There are a few typos in kernel/power/swap.c. Fix them. Signed-off-by: Cesar Eduardo Barros Acked-by: Pavel Machek Signed-off-by: Rafael J. Wysocki --- kernel/power/swap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/power/swap.c b/kernel/power/swap.c index b0bb217..7c3ae83 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -32,7 +32,7 @@ /* * The swap map is a data structure used for keeping track of each page * written to a swap partition. It consists of many swap_map_page - * structures that contain each an array of MAP_PAGE_SIZE swap entries. + * structures that contain each an array of MAP_PAGE_ENTRIES swap entries. * These structures are stored on the swap and linked together with the * help of the .next_swap member. * @@ -148,7 +148,7 @@ sector_t alloc_swapdev_block(int swap) /** * free_all_swap_pages - free swap pages allocated for saving image data. - * It also frees the extents used to register which swap entres had been + * It also frees the extents used to register which swap entries had been * allocated. */ -- cgit v1.1 From c125e96f044427f38d106fab7bc5e4a5e6a18262 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 5 Jul 2010 22:43:53 +0200 Subject: PM: Make it possible to avoid races between wakeup and system sleep One of the arguments during the suspend blockers discussion was that the mainline kernel didn't contain any mechanisms making it possible to avoid races between wakeup and system suspend. Generally, there are two problems in that area. First, if a wakeup event occurs exactly when /sys/power/state is being written to, it may be delivered to user space right before the freezer kicks in, so the user space consumer of the event may not be able to process it before the system is suspended. Second, if a wakeup event occurs after user space has been frozen, it is not generally guaranteed that the ongoing transition of the system into a sleep state will be aborted. To address these issues introduce a new global sysfs attribute, /sys/power/wakeup_count, associated with a running counter of wakeup events and three helper functions, pm_stay_awake(), pm_relax(), and pm_wakeup_event(), that may be used by kernel subsystems to control the behavior of this attribute and to request the PM core to abort system transitions into a sleep state already in progress. The /sys/power/wakeup_count file may be read from or written to by user space. Reads will always succeed (unless interrupted by a signal) and return the current value of the wakeup events counter. Writes, however, will only succeed if the written number is equal to the current value of the wakeup events counter. If a write is successful, it will cause the kernel to save the current value of the wakeup events counter and to abort the subsequent system transition into a sleep state if any wakeup events are reported after the write has returned. [The assumption is that before writing to /sys/power/state user space will first read from /sys/power/wakeup_count. Next, user space consumers of wakeup events will have a chance to acknowledge or veto the upcoming system transition to a sleep state. Finally, if the transition is allowed to proceed, /sys/power/wakeup_count will be written to and if that succeeds, /sys/power/state will be written to as well. Still, if any wakeup events are reported to the PM core by kernel subsystems after that point, the transition will be aborted.] Additionally, put a wakeup events counter into struct dev_pm_info and make these per-device wakeup event counters available via sysfs, so that it's possible to check the activity of various wakeup event sources within the kernel. To illustrate how subsystems can use pm_wakeup_event(), make the low-level PCI runtime PM wakeup-handling code use it. Signed-off-by: Rafael J. Wysocki Acked-by: Jesse Barnes Acked-by: Greg Kroah-Hartman Acked-by: markgross Reviewed-by: Alan Stern --- kernel/power/hibernate.c | 20 ++++++++++++------ kernel/power/main.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++ kernel/power/suspend.c | 4 +++- 3 files changed, 72 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index aa9e916..f612029 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -277,7 +277,7 @@ static int create_image(int platform_mode) goto Enable_irqs; } - if (hibernation_test(TEST_CORE)) + if (hibernation_test(TEST_CORE) || !pm_check_wakeup_events()) goto Power_up; in_suspend = 1; @@ -288,8 +288,10 @@ static int create_image(int platform_mode) error); /* Restore control flow magically appears here */ restore_processor_state(); - if (!in_suspend) + if (!in_suspend) { + events_check_enabled = false; platform_leave(platform_mode); + } Power_up: sysdev_resume(); @@ -511,14 +513,20 @@ int hibernation_platform_enter(void) local_irq_disable(); sysdev_suspend(PMSG_HIBERNATE); + if (!pm_check_wakeup_events()) { + error = -EAGAIN; + goto Power_up; + } + hibernation_ops->enter(); /* We should never get here */ while (1); - /* - * We don't need to reenable the nonboot CPUs or resume consoles, since - * the system is going to be halted anyway. - */ + Power_up: + sysdev_resume(); + local_irq_enable(); + enable_nonboot_cpus(); + Platform_finish: hibernation_ops->finish(); diff --git a/kernel/power/main.c b/kernel/power/main.c index b58800b..62b0bc6 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -204,6 +204,60 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, power_attr(state); +#ifdef CONFIG_PM_SLEEP +/* + * The 'wakeup_count' attribute, along with the functions defined in + * drivers/base/power/wakeup.c, provides a means by which wakeup events can be + * handled in a non-racy way. + * + * If a wakeup event occurs when the system is in a sleep state, it simply is + * woken up. In turn, if an event that would wake the system up from a sleep + * state occurs when it is undergoing a transition to that sleep state, the + * transition should be aborted. Moreover, if such an event occurs when the + * system is in the working state, an attempt to start a transition to the + * given sleep state should fail during certain period after the detection of + * the event. Using the 'state' attribute alone is not sufficient to satisfy + * these requirements, because a wakeup event may occur exactly when 'state' + * is being written to and may be delivered to user space right before it is + * frozen, so the event will remain only partially processed until the system is + * woken up by another event. In particular, it won't cause the transition to + * a sleep state to be aborted. + * + * This difficulty may be overcome if user space uses 'wakeup_count' before + * writing to 'state'. It first should read from 'wakeup_count' and store + * the read value. Then, after carrying out its own preparations for the system + * transition to a sleep state, it should write the stored value to + * 'wakeup_count'. If that fails, at least one wakeup event has occured since + * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it + * is allowed to write to 'state', but the transition will be aborted if there + * are any wakeup events detected after 'wakeup_count' was written to. + */ + +static ssize_t wakeup_count_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + unsigned long val; + + return pm_get_wakeup_count(&val) ? sprintf(buf, "%lu\n", val) : -EINTR; +} + +static ssize_t wakeup_count_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t n) +{ + unsigned long val; + + if (sscanf(buf, "%lu", &val) == 1) { + if (pm_save_wakeup_count(val)) + return n; + } + return -EINVAL; +} + +power_attr(wakeup_count); +#endif /* CONFIG_PM_SLEEP */ + #ifdef CONFIG_PM_TRACE int pm_trace_enabled; @@ -236,6 +290,7 @@ static struct attribute * g[] = { #endif #ifdef CONFIG_PM_SLEEP &pm_async_attr.attr, + &wakeup_count_attr.attr, #ifdef CONFIG_PM_DEBUG &pm_test_attr.attr, #endif diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index f37cb7d..5f8d09f 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -163,8 +163,10 @@ static int suspend_enter(suspend_state_t state) error = sysdev_suspend(PMSG_SUSPEND); if (!error) { - if (!suspend_test(TEST_CORE)) + if (!suspend_test(TEST_CORE) && pm_check_wakeup_events()) { error = suspend_ops->enter(state); + events_check_enabled = false; + } sysdev_resume(); } -- cgit v1.1 From 5f279845f9d684661563894d44729a0c706375b4 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Mon, 19 Jul 2010 02:00:18 +0200 Subject: pm_qos: Reimplement using plists A lot of the pm_qos extremal value handling is really duplicating what a priority ordered list does, just in a less efficient fashion. Simply redoing the implementation in terms of a plist gets rid of a lot of this junk (although there are several other strange things that could do with tidying up, like pm_qos_request_list has to carry the pm_qos_class with every node, simply because it doesn't get passed in to pm_qos_update_request even though every caller knows full well what parameter it's updating). I think this redo is a win independent of android, so we should do something like this now. Signed-off-by: James Bottomley Signed-off-by: mark gross Signed-off-by: Rafael J. Wysocki --- kernel/pm_qos_params.c | 172 ++++++++++++++++++++++++------------------------- 1 file changed, 86 insertions(+), 86 deletions(-) (limited to 'kernel') diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index f42d3f7..db8e51d 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c @@ -30,6 +30,7 @@ /*#define DEBUG*/ #include +#include #include #include #include @@ -49,58 +50,53 @@ * held, taken with _irqsave. One lock to rule them all */ struct pm_qos_request_list { - struct list_head list; - union { - s32 value; - s32 usec; - s32 kbps; - }; + struct plist_node list; int pm_qos_class; }; -static s32 max_compare(s32 v1, s32 v2); -static s32 min_compare(s32 v1, s32 v2); +enum pm_qos_type { + PM_QOS_MAX, /* return the largest value */ + PM_QOS_MIN /* return the smallest value */ +}; struct pm_qos_object { - struct pm_qos_request_list requests; + struct plist_head requests; struct blocking_notifier_head *notifiers; struct miscdevice pm_qos_power_miscdev; char *name; s32 default_value; - atomic_t target_value; - s32 (*comparitor)(s32, s32); + enum pm_qos_type type; }; +static DEFINE_SPINLOCK(pm_qos_lock); + static struct pm_qos_object null_pm_qos; static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier); static struct pm_qos_object cpu_dma_pm_qos = { - .requests = {LIST_HEAD_INIT(cpu_dma_pm_qos.requests.list)}, + .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock), .notifiers = &cpu_dma_lat_notifier, .name = "cpu_dma_latency", .default_value = 2000 * USEC_PER_SEC, - .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), - .comparitor = min_compare + .type = PM_QOS_MIN, }; static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); static struct pm_qos_object network_lat_pm_qos = { - .requests = {LIST_HEAD_INIT(network_lat_pm_qos.requests.list)}, + .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock), .notifiers = &network_lat_notifier, .name = "network_latency", .default_value = 2000 * USEC_PER_SEC, - .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), - .comparitor = min_compare + .type = PM_QOS_MIN }; static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); static struct pm_qos_object network_throughput_pm_qos = { - .requests = {LIST_HEAD_INIT(network_throughput_pm_qos.requests.list)}, + .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock), .notifiers = &network_throughput_notifier, .name = "network_throughput", .default_value = 0, - .target_value = ATOMIC_INIT(0), - .comparitor = max_compare + .type = PM_QOS_MAX, }; @@ -111,8 +107,6 @@ static struct pm_qos_object *pm_qos_array[] = { &network_throughput_pm_qos }; -static DEFINE_SPINLOCK(pm_qos_lock); - static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos); static int pm_qos_power_open(struct inode *inode, struct file *filp); @@ -124,46 +118,55 @@ static const struct file_operations pm_qos_power_fops = { .release = pm_qos_power_release, }; -/* static helper functions */ -static s32 max_compare(s32 v1, s32 v2) +/* unlocked internal variant */ +static inline int pm_qos_get_value(struct pm_qos_object *o) { - return max(v1, v2); -} + if (plist_head_empty(&o->requests)) + return o->default_value; -static s32 min_compare(s32 v1, s32 v2) -{ - return min(v1, v2); -} + switch (o->type) { + case PM_QOS_MIN: + return plist_last(&o->requests)->prio; + case PM_QOS_MAX: + return plist_first(&o->requests)->prio; -static void update_target(int pm_qos_class) + default: + /* runtime check for not using enum */ + BUG(); + } +} + +static void update_target(struct pm_qos_object *o, struct plist_node *node, + int del, int value) { - s32 extreme_value; - struct pm_qos_request_list *node; unsigned long flags; - int call_notifier = 0; + int prev_value, curr_value; spin_lock_irqsave(&pm_qos_lock, flags); - extreme_value = pm_qos_array[pm_qos_class]->default_value; - list_for_each_entry(node, - &pm_qos_array[pm_qos_class]->requests.list, list) { - extreme_value = pm_qos_array[pm_qos_class]->comparitor( - extreme_value, node->value); - } - if (atomic_read(&pm_qos_array[pm_qos_class]->target_value) != - extreme_value) { - call_notifier = 1; - atomic_set(&pm_qos_array[pm_qos_class]->target_value, - extreme_value); - pr_debug(KERN_ERR "new target for qos %d is %d\n", pm_qos_class, - atomic_read(&pm_qos_array[pm_qos_class]->target_value)); + prev_value = pm_qos_get_value(o); + /* PM_QOS_DEFAULT_VALUE is a signal that the value is unchanged */ + if (value != PM_QOS_DEFAULT_VALUE) { + /* + * to change the list, we atomically remove, reinit + * with new value and add, then see if the extremal + * changed + */ + plist_del(node, &o->requests); + plist_node_init(node, value); + plist_add(node, &o->requests); + } else if (del) { + plist_del(node, &o->requests); + } else { + plist_add(node, &o->requests); } + curr_value = pm_qos_get_value(o); spin_unlock_irqrestore(&pm_qos_lock, flags); - if (call_notifier) - blocking_notifier_call_chain( - pm_qos_array[pm_qos_class]->notifiers, - (unsigned long) extreme_value, NULL); + if (prev_value != curr_value) + blocking_notifier_call_chain(o->notifiers, + (unsigned long)curr_value, + NULL); } static int register_pm_qos_misc(struct pm_qos_object *qos) @@ -196,7 +199,14 @@ static int find_pm_qos_object_by_minor(int minor) */ int pm_qos_request(int pm_qos_class) { - return atomic_read(&pm_qos_array[pm_qos_class]->target_value); + unsigned long flags; + int value; + + spin_lock_irqsave(&pm_qos_lock, flags); + value = pm_qos_get_value(pm_qos_array[pm_qos_class]); + spin_unlock_irqrestore(&pm_qos_lock, flags); + + return value; } EXPORT_SYMBOL_GPL(pm_qos_request); @@ -214,21 +224,19 @@ EXPORT_SYMBOL_GPL(pm_qos_request); struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value) { struct pm_qos_request_list *dep; - unsigned long flags; dep = kzalloc(sizeof(struct pm_qos_request_list), GFP_KERNEL); if (dep) { + struct pm_qos_object *o = pm_qos_array[pm_qos_class]; + int new_value; + if (value == PM_QOS_DEFAULT_VALUE) - dep->value = pm_qos_array[pm_qos_class]->default_value; + new_value = o->default_value; else - dep->value = value; + new_value = value; + plist_node_init(&dep->list, new_value); dep->pm_qos_class = pm_qos_class; - - spin_lock_irqsave(&pm_qos_lock, flags); - list_add(&dep->list, - &pm_qos_array[pm_qos_class]->requests.list); - spin_unlock_irqrestore(&pm_qos_lock, flags); - update_target(pm_qos_class); + update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE); } return dep; @@ -246,27 +254,23 @@ EXPORT_SYMBOL_GPL(pm_qos_add_request); * Attempts are made to make this code callable on hot code paths. */ void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, - s32 new_value) + s32 new_value) { - unsigned long flags; - int pending_update = 0; s32 temp; + struct pm_qos_object *o; - if (pm_qos_req) { /*guard against callers passing in null */ - spin_lock_irqsave(&pm_qos_lock, flags); - if (new_value == PM_QOS_DEFAULT_VALUE) - temp = pm_qos_array[pm_qos_req->pm_qos_class]->default_value; - else - temp = new_value; - - if (temp != pm_qos_req->value) { - pending_update = 1; - pm_qos_req->value = temp; - } - spin_unlock_irqrestore(&pm_qos_lock, flags); - if (pending_update) - update_target(pm_qos_req->pm_qos_class); - } + if (!pm_qos_req) /*guard against callers passing in null */ + return; + + o = pm_qos_array[pm_qos_req->pm_qos_class]; + + if (new_value == PM_QOS_DEFAULT_VALUE) + temp = o->default_value; + else + temp = new_value; + + if (temp != pm_qos_req->list.prio) + update_target(o, &pm_qos_req->list, 0, temp); } EXPORT_SYMBOL_GPL(pm_qos_update_request); @@ -280,19 +284,15 @@ EXPORT_SYMBOL_GPL(pm_qos_update_request); */ void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req) { - unsigned long flags; - int qos_class; + struct pm_qos_object *o; if (pm_qos_req == NULL) return; /* silent return to keep pcm code cleaner */ - qos_class = pm_qos_req->pm_qos_class; - spin_lock_irqsave(&pm_qos_lock, flags); - list_del(&pm_qos_req->list); + o = pm_qos_array[pm_qos_req->pm_qos_class]; + update_target(o, &pm_qos_req->list, 1, PM_QOS_DEFAULT_VALUE); kfree(pm_qos_req); - spin_unlock_irqrestore(&pm_qos_lock, flags); - update_target(qos_class); } EXPORT_SYMBOL_GPL(pm_qos_remove_request); -- cgit v1.1 From 82f682514a5df89ffb3890627eebf0897b7a84ec Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Mon, 5 Jul 2010 22:53:06 +0200 Subject: pm_qos: Get rid of the allocation in pm_qos_add_request() All current users of pm_qos_add_request() have the ability to supply the memory required by the pm_qos routines, so make them do this and eliminate the kmalloc() with pm_qos_add_request(). This has the double benefit of making the call never fail and allowing it to be called from atomic context. Signed-off-by: James Bottomley Signed-off-by: mark gross Signed-off-by: Rafael J. Wysocki --- kernel/pm_qos_params.c | 67 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 40 insertions(+), 27 deletions(-) (limited to 'kernel') diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index db8e51d..996a4de 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c @@ -30,7 +30,6 @@ /*#define DEBUG*/ #include -#include #include #include #include @@ -49,11 +48,6 @@ * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock * held, taken with _irqsave. One lock to rule them all */ -struct pm_qos_request_list { - struct plist_node list; - int pm_qos_class; -}; - enum pm_qos_type { PM_QOS_MAX, /* return the largest value */ PM_QOS_MIN /* return the smallest value */ @@ -210,6 +204,12 @@ int pm_qos_request(int pm_qos_class) } EXPORT_SYMBOL_GPL(pm_qos_request); +int pm_qos_request_active(struct pm_qos_request_list *req) +{ + return req->pm_qos_class != 0; +} +EXPORT_SYMBOL_GPL(pm_qos_request_active); + /** * pm_qos_add_request - inserts new qos request into the list * @pm_qos_class: identifies which list of qos request to us @@ -221,25 +221,23 @@ EXPORT_SYMBOL_GPL(pm_qos_request); * element as a handle for use in updating and removal. Call needs to save * this handle for later use. */ -struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value) +void pm_qos_add_request(struct pm_qos_request_list *dep, + int pm_qos_class, s32 value) { - struct pm_qos_request_list *dep; - - dep = kzalloc(sizeof(struct pm_qos_request_list), GFP_KERNEL); - if (dep) { - struct pm_qos_object *o = pm_qos_array[pm_qos_class]; - int new_value; - - if (value == PM_QOS_DEFAULT_VALUE) - new_value = o->default_value; - else - new_value = value; - plist_node_init(&dep->list, new_value); - dep->pm_qos_class = pm_qos_class; - update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE); - } + struct pm_qos_object *o = pm_qos_array[pm_qos_class]; + int new_value; - return dep; + if (pm_qos_request_active(dep)) { + WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n"); + return; + } + if (value == PM_QOS_DEFAULT_VALUE) + new_value = o->default_value; + else + new_value = value; + plist_node_init(&dep->list, new_value); + dep->pm_qos_class = pm_qos_class; + update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE); } EXPORT_SYMBOL_GPL(pm_qos_add_request); @@ -262,6 +260,11 @@ void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, if (!pm_qos_req) /*guard against callers passing in null */ return; + if (!pm_qos_request_active(pm_qos_req)) { + WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n"); + return; + } + o = pm_qos_array[pm_qos_req->pm_qos_class]; if (new_value == PM_QOS_DEFAULT_VALUE) @@ -290,9 +293,14 @@ void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req) return; /* silent return to keep pcm code cleaner */ + if (!pm_qos_request_active(pm_qos_req)) { + WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n"); + return; + } + o = pm_qos_array[pm_qos_req->pm_qos_class]; update_target(o, &pm_qos_req->list, 1, PM_QOS_DEFAULT_VALUE); - kfree(pm_qos_req); + memset(pm_qos_req, 0, sizeof(*pm_qos_req)); } EXPORT_SYMBOL_GPL(pm_qos_remove_request); @@ -340,8 +348,12 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp) pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); if (pm_qos_class >= 0) { - filp->private_data = (void *) pm_qos_add_request(pm_qos_class, - PM_QOS_DEFAULT_VALUE); + struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req)); + if (!req) + return -ENOMEM; + + pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE); + filp->private_data = req; if (filp->private_data) return 0; @@ -353,8 +365,9 @@ static int pm_qos_power_release(struct inode *inode, struct file *filp) { struct pm_qos_request_list *req; - req = (struct pm_qos_request_list *)filp->private_data; + req = filp->private_data; pm_qos_remove_request(req); + kfree(req); return 0; } -- cgit v1.1 From f6f71f187518477cecc01cd887933b5da19585e6 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 7 Jul 2010 23:43:18 +0200 Subject: PM / Hibernate: Fix hibernation_platform_enter() The hibernation_platform_enter() function calls dpm_suspend_noirq() instead of dpm_resume_noirq() by mistake. Fix this. Signed-off-by: Rafael J. Wysocki Acked-by: Len Brown --- kernel/power/hibernate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index f612029..d97ba86 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -530,7 +530,7 @@ int hibernation_platform_enter(void) Platform_finish: hibernation_ops->finish(); - dpm_suspend_noirq(PMSG_RESTORE); + dpm_resume_noirq(PMSG_RESTORE); Resume_devices: entering_platform_hibernation = false; -- cgit v1.1 From d074ee023fa3a4681b64223c5e636102c39628c4 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 7 Jul 2010 23:43:35 +0200 Subject: PM / Hibernate: Fix snapshot error code path There is an inconsistency between hibernation_platform_enter() and hibernation_snapshot(), because the latter calls hibernation_ops->end() after failing hibernation_ops->begin(), while the former doesn't do that. Make hibernation_snapshot() behave in the same way as hibernation_platform_enter() in that respect. Signed-off-by: Rafael J. Wysocki Acked-by: Len Brown --- kernel/power/hibernate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index d97ba86..d26f04e 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -330,7 +330,7 @@ int hibernation_snapshot(int platform_mode) error = platform_begin(platform_mode); if (error) - return error; + goto Close; /* Preallocate image memory before shutting down devices. */ error = hibernate_preallocate_memory(); -- cgit v1.1 From ce4410116c5debfb0e049f5db4b5cd6211e05b80 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 7 Jul 2010 23:43:45 +0200 Subject: PM / Suspend: Fix ordering of calls in suspend error paths The ACPI suspend code calls suspend_nvs_free() at a wrong place, which may lead to a memory leak if there's an error executing acpi_pm_prepare(), because acpi_pm_finish() will not be called in that case. However, the root cause of this problem is the apparently confusing ordering of calls in suspend error paths that needs to be fixed. In addition to that, fix a typo in a label name in suspend.c. Signed-off-by: Rafael J. Wysocki Acked-by: Len Brown --- kernel/power/suspend.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 5f8d09f..7335952 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -136,19 +136,19 @@ static int suspend_enter(suspend_state_t state) if (suspend_ops->prepare) { error = suspend_ops->prepare(); if (error) - return error; + goto Platform_finish; } error = dpm_suspend_noirq(PMSG_SUSPEND); if (error) { printk(KERN_ERR "PM: Some devices failed to power down\n"); - goto Platfrom_finish; + goto Platform_finish; } if (suspend_ops->prepare_late) { error = suspend_ops->prepare_late(); if (error) - goto Power_up_devices; + goto Platform_wake; } if (suspend_test(TEST_PLATFORM)) @@ -180,10 +180,9 @@ static int suspend_enter(suspend_state_t state) if (suspend_ops->wake) suspend_ops->wake(); - Power_up_devices: dpm_resume_noirq(PMSG_RESUME); - Platfrom_finish: + Platform_finish: if (suspend_ops->finish) suspend_ops->finish(); -- cgit v1.1 From e15bacbebb9dcc95f148f28dfc83a6d5e48b60b8 Mon Sep 17 00:00:00 2001 From: Dan Kruchinin Date: Wed, 14 Jul 2010 14:31:57 +0400 Subject: padata: Make two separate cpumasks The aim of this patch is to make two separate cpumasks for padata parallel and serial workers respectively. It allows user to make more thin and sophisticated configurations of padata framework. For example user may bind parallel and serial workers to non-intersecting CPU groups to gain better performance. Also each padata instance has notifiers chain for its cpumasks now. If either parallel or serial or both masks were changed all interested subsystems will get notification about that. It's especially useful if padata user uses algorithm for callback CPU selection according to serial cpumask. Signed-off-by: Dan Kruchinin Signed-off-by: Herbert Xu --- kernel/padata.c | 471 +++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 347 insertions(+), 124 deletions(-) (limited to 'kernel') diff --git a/kernel/padata.c b/kernel/padata.c index 450d67d..84d0ca9 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -35,9 +35,9 @@ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) { int cpu, target_cpu; - target_cpu = cpumask_first(pd->cpumask); + target_cpu = cpumask_first(pd->cpumask.pcpu); for (cpu = 0; cpu < cpu_index; cpu++) - target_cpu = cpumask_next(target_cpu, pd->cpumask); + target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); return target_cpu; } @@ -53,26 +53,27 @@ static int padata_cpu_hash(struct padata_priv *padata) * Hash the sequence numbers to the cpus by taking * seq_nr mod. number of cpus in use. */ - cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask); + cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask.pcpu); return padata_index_to_cpu(pd, cpu_index); } -static void padata_parallel_worker(struct work_struct *work) +static void padata_parallel_worker(struct work_struct *parallel_work) { - struct padata_queue *queue; + struct padata_parallel_queue *pqueue; struct parallel_data *pd; struct padata_instance *pinst; LIST_HEAD(local_list); local_bh_disable(); - queue = container_of(work, struct padata_queue, pwork); - pd = queue->pd; + pqueue = container_of(parallel_work, + struct padata_parallel_queue, work); + pd = pqueue->pd; pinst = pd->pinst; - spin_lock(&queue->parallel.lock); - list_replace_init(&queue->parallel.list, &local_list); - spin_unlock(&queue->parallel.lock); + spin_lock(&pqueue->parallel.lock); + list_replace_init(&pqueue->parallel.list, &local_list); + spin_unlock(&pqueue->parallel.lock); while (!list_empty(&local_list)) { struct padata_priv *padata; @@ -94,7 +95,7 @@ static void padata_parallel_worker(struct work_struct *work) * @pinst: padata instance * @padata: object to be parallelized * @cb_cpu: cpu the serialization callback function will run on, - * must be in the cpumask of padata. + * must be in the serial cpumask of padata(i.e. cpumask.cbcpu). * * The parallelization callback function will run with BHs off. * Note: Every object which is parallelized by padata_do_parallel @@ -104,7 +105,7 @@ int padata_do_parallel(struct padata_instance *pinst, struct padata_priv *padata, int cb_cpu) { int target_cpu, err; - struct padata_queue *queue; + struct padata_parallel_queue *queue; struct parallel_data *pd; rcu_read_lock_bh(); @@ -115,7 +116,7 @@ int padata_do_parallel(struct padata_instance *pinst, if (!(pinst->flags & PADATA_INIT)) goto out; - if (!cpumask_test_cpu(cb_cpu, pd->cpumask)) + if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) goto out; err = -EBUSY; @@ -136,13 +137,13 @@ int padata_do_parallel(struct padata_instance *pinst, padata->seq_nr = atomic_inc_return(&pd->seq_nr); target_cpu = padata_cpu_hash(padata); - queue = per_cpu_ptr(pd->queue, target_cpu); + queue = per_cpu_ptr(pd->pqueue, target_cpu); spin_lock(&queue->parallel.lock); list_add_tail(&padata->list, &queue->parallel.list); spin_unlock(&queue->parallel.lock); - queue_work_on(target_cpu, pinst->wq, &queue->pwork); + queue_work_on(target_cpu, pinst->wq, &queue->work); out: rcu_read_unlock_bh(); @@ -172,11 +173,11 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) { int cpu, num_cpus; int next_nr, next_index; - struct padata_queue *queue, *next_queue; + struct padata_parallel_queue *queue, *next_queue; struct padata_priv *padata; struct padata_list *reorder; - num_cpus = cpumask_weight(pd->cpumask); + num_cpus = cpumask_weight(pd->cpumask.pcpu); /* * Calculate the percpu reorder queue and the sequence @@ -185,13 +186,13 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) next_nr = pd->processed; next_index = next_nr % num_cpus; cpu = padata_index_to_cpu(pd, next_index); - next_queue = per_cpu_ptr(pd->queue, cpu); + next_queue = per_cpu_ptr(pd->pqueue, cpu); if (unlikely(next_nr > pd->max_seq_nr)) { next_nr = next_nr - pd->max_seq_nr - 1; next_index = next_nr % num_cpus; cpu = padata_index_to_cpu(pd, next_index); - next_queue = per_cpu_ptr(pd->queue, cpu); + next_queue = per_cpu_ptr(pd->pqueue, cpu); pd->processed = 0; } @@ -215,7 +216,7 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) goto out; } - queue = per_cpu_ptr(pd->queue, smp_processor_id()); + queue = per_cpu_ptr(pd->pqueue, smp_processor_id()); if (queue->cpu_index == next_queue->cpu_index) { padata = ERR_PTR(-ENODATA); goto out; @@ -229,7 +230,7 @@ out: static void padata_reorder(struct parallel_data *pd) { struct padata_priv *padata; - struct padata_queue *queue; + struct padata_serial_queue *squeue; struct padata_instance *pinst = pd->pinst; /* @@ -268,13 +269,13 @@ static void padata_reorder(struct parallel_data *pd) return; } - queue = per_cpu_ptr(pd->queue, padata->cb_cpu); + squeue = per_cpu_ptr(pd->squeue, padata->cb_cpu); - spin_lock(&queue->serial.lock); - list_add_tail(&padata->list, &queue->serial.list); - spin_unlock(&queue->serial.lock); + spin_lock(&squeue->serial.lock); + list_add_tail(&padata->list, &squeue->serial.list); + spin_unlock(&squeue->serial.lock); - queue_work_on(padata->cb_cpu, pinst->wq, &queue->swork); + queue_work_on(padata->cb_cpu, pinst->wq, &squeue->work); } spin_unlock_bh(&pd->lock); @@ -300,19 +301,19 @@ static void padata_reorder_timer(unsigned long arg) padata_reorder(pd); } -static void padata_serial_worker(struct work_struct *work) +static void padata_serial_worker(struct work_struct *serial_work) { - struct padata_queue *queue; + struct padata_serial_queue *squeue; struct parallel_data *pd; LIST_HEAD(local_list); local_bh_disable(); - queue = container_of(work, struct padata_queue, swork); - pd = queue->pd; + squeue = container_of(serial_work, struct padata_serial_queue, work); + pd = squeue->pd; - spin_lock(&queue->serial.lock); - list_replace_init(&queue->serial.list, &local_list); - spin_unlock(&queue->serial.lock); + spin_lock(&squeue->serial.lock); + list_replace_init(&squeue->serial.list, &local_list); + spin_unlock(&squeue->serial.lock); while (!list_empty(&local_list)) { struct padata_priv *padata; @@ -339,18 +340,18 @@ static void padata_serial_worker(struct work_struct *work) void padata_do_serial(struct padata_priv *padata) { int cpu; - struct padata_queue *queue; + struct padata_parallel_queue *pqueue; struct parallel_data *pd; pd = padata->pd; cpu = get_cpu(); - queue = per_cpu_ptr(pd->queue, cpu); + pqueue = per_cpu_ptr(pd->pqueue, cpu); - spin_lock(&queue->reorder.lock); + spin_lock(&pqueue->reorder.lock); atomic_inc(&pd->reorder_objects); - list_add_tail(&padata->list, &queue->reorder.list); - spin_unlock(&queue->reorder.lock); + list_add_tail(&padata->list, &pqueue->reorder.list); + spin_unlock(&pqueue->reorder.lock); put_cpu(); @@ -358,51 +359,88 @@ void padata_do_serial(struct padata_priv *padata) } EXPORT_SYMBOL(padata_do_serial); -/* Allocate and initialize the internal cpumask dependend resources. */ -static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, - const struct cpumask *cpumask) +static int padata_setup_cpumasks(struct parallel_data *pd, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask) { - int cpu, cpu_index, num_cpus; - struct padata_queue *queue; - struct parallel_data *pd; + if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) + return -ENOMEM; - cpu_index = 0; + cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_active_mask); + if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { + free_cpumask_var(pd->cpumask.cbcpu); + return -ENOMEM; + } - pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); - if (!pd) - goto err; + cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_active_mask); + return 0; +} - pd->queue = alloc_percpu(struct padata_queue); - if (!pd->queue) - goto err_free_pd; +static void __padata_list_init(struct padata_list *pd_list) +{ + INIT_LIST_HEAD(&pd_list->list); + spin_lock_init(&pd_list->lock); +} - if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL)) - goto err_free_queue; +/* Initialize all percpu queues used by serial workers */ +static void padata_init_squeues(struct parallel_data *pd) +{ + int cpu; + struct padata_serial_queue *squeue; - cpumask_and(pd->cpumask, cpumask, cpu_active_mask); + for_each_cpu(cpu, pd->cpumask.cbcpu) { + squeue = per_cpu_ptr(pd->squeue, cpu); + squeue->pd = pd; + __padata_list_init(&squeue->serial); + INIT_WORK(&squeue->work, padata_serial_worker); + } +} - for_each_cpu(cpu, pd->cpumask) { - queue = per_cpu_ptr(pd->queue, cpu); +/* Initialize all percpu queues used by parallel workers */ +static void padata_init_pqueues(struct parallel_data *pd) +{ + int cpu_index, num_cpus, cpu; + struct padata_parallel_queue *pqueue; - queue->pd = pd; + cpu_index = 0; + for_each_cpu(cpu, pd->cpumask.pcpu) { + pqueue = per_cpu_ptr(pd->pqueue, cpu); + pqueue->pd = pd; + pqueue->cpu_index = cpu_index; + + __padata_list_init(&pqueue->reorder); + __padata_list_init(&pqueue->parallel); + INIT_WORK(&pqueue->work, padata_parallel_worker); + atomic_set(&pqueue->num_obj, 0); + } - queue->cpu_index = cpu_index; - cpu_index++; + num_cpus = cpumask_weight(pd->cpumask.pcpu); + pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1; +} - INIT_LIST_HEAD(&queue->reorder.list); - INIT_LIST_HEAD(&queue->parallel.list); - INIT_LIST_HEAD(&queue->serial.list); - spin_lock_init(&queue->reorder.lock); - spin_lock_init(&queue->parallel.lock); - spin_lock_init(&queue->serial.lock); +/* Allocate and initialize the internal cpumask dependend resources. */ +static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask) +{ + struct parallel_data *pd; - INIT_WORK(&queue->pwork, padata_parallel_worker); - INIT_WORK(&queue->swork, padata_serial_worker); - } + pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); + if (!pd) + goto err; - num_cpus = cpumask_weight(pd->cpumask); - pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1; + pd->pqueue = alloc_percpu(struct padata_parallel_queue); + if (!pd->pqueue) + goto err_free_pd; + + pd->squeue = alloc_percpu(struct padata_serial_queue); + if (!pd->squeue) + goto err_free_pqueue; + if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) + goto err_free_squeue; + padata_init_pqueues(pd); + padata_init_squeues(pd); setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); atomic_set(&pd->seq_nr, -1); atomic_set(&pd->reorder_objects, 0); @@ -412,8 +450,10 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, return pd; -err_free_queue: - free_percpu(pd->queue); +err_free_squeue: + free_percpu(pd->squeue); +err_free_pqueue: + free_percpu(pd->pqueue); err_free_pd: kfree(pd); err: @@ -422,8 +462,10 @@ err: static void padata_free_pd(struct parallel_data *pd) { - free_cpumask_var(pd->cpumask); - free_percpu(pd->queue); + free_cpumask_var(pd->cpumask.pcpu); + free_cpumask_var(pd->cpumask.cbcpu); + free_percpu(pd->pqueue); + free_percpu(pd->squeue); kfree(pd); } @@ -431,11 +473,12 @@ static void padata_free_pd(struct parallel_data *pd) static void padata_flush_queues(struct parallel_data *pd) { int cpu; - struct padata_queue *queue; + struct padata_parallel_queue *pqueue; + struct padata_serial_queue *squeue; - for_each_cpu(cpu, pd->cpumask) { - queue = per_cpu_ptr(pd->queue, cpu); - flush_work(&queue->pwork); + for_each_cpu(cpu, pd->cpumask.pcpu) { + pqueue = per_cpu_ptr(pd->pqueue, cpu); + flush_work(&pqueue->work); } del_timer_sync(&pd->timer); @@ -443,9 +486,9 @@ static void padata_flush_queues(struct parallel_data *pd) if (atomic_read(&pd->reorder_objects)) padata_reorder(pd); - for_each_cpu(cpu, pd->cpumask) { - queue = per_cpu_ptr(pd->queue, cpu); - flush_work(&queue->swork); + for_each_cpu(cpu, pd->cpumask.cbcpu) { + squeue = per_cpu_ptr(pd->squeue, cpu); + flush_work(&squeue->work); } BUG_ON(atomic_read(&pd->refcnt) != 0); @@ -475,21 +518,63 @@ static void padata_replace(struct padata_instance *pinst, struct parallel_data *pd_new) { struct parallel_data *pd_old = pinst->pd; + int notification_mask = 0; pinst->flags |= PADATA_RESET; rcu_assign_pointer(pinst->pd, pd_new); synchronize_rcu(); + if (!pd_old) + goto out; - if (pd_old) { - padata_flush_queues(pd_old); - padata_free_pd(pd_old); - } + padata_flush_queues(pd_old); + if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) + notification_mask |= PADATA_CPU_PARALLEL; + if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) + notification_mask |= PADATA_CPU_SERIAL; + + padata_free_pd(pd_old); + if (notification_mask) + blocking_notifier_call_chain(&pinst->cpumask_change_notifier, + notification_mask, pinst); +out: pinst->flags &= ~PADATA_RESET; } +/** + * padata_register_cpumask_notifier - Registers a notifier that will be called + * if either pcpu or cbcpu or both cpumasks change. + * + * @pinst: A poineter to padata instance + * @nblock: A pointer to notifier block. + */ +int padata_register_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock) +{ + return blocking_notifier_chain_register(&pinst->cpumask_change_notifier, + nblock); +} +EXPORT_SYMBOL(padata_register_cpumask_notifier); + +/** + * padata_unregister_cpumask_notifier - Unregisters cpumask notifier + * registered earlier using padata_register_cpumask_notifier + * + * @pinst: A pointer to data instance. + * @nlock: A pointer to notifier block. + */ +int padata_unregister_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock) +{ + return blocking_notifier_chain_unregister( + &pinst->cpumask_change_notifier, + nblock); +} +EXPORT_SYMBOL(padata_unregister_cpumask_notifier); + + /* If cpumask contains no active cpu, we mark the instance as invalid. */ static bool padata_validate_cpumask(struct padata_instance *pinst, const struct cpumask *cpumask) @@ -504,13 +589,82 @@ static bool padata_validate_cpumask(struct padata_instance *pinst, } /** - * padata_set_cpumask - set the cpumask that padata should use + * padata_get_cpumask: Fetch serial or parallel cpumask from the + * given padata instance and copy it to @out_mask + * + * @pinst: A pointer to padata instance + * @cpumask_type: Specifies which cpumask will be copied. + * Possible values are PADATA_CPU_SERIAL *or* PADATA_CPU_PARALLEL + * corresponding to serial and parallel cpumask respectively. + * @out_mask: A pointer to cpumask structure where selected + * cpumask will be copied. + */ +int padata_get_cpumask(struct padata_instance *pinst, + int cpumask_type, struct cpumask *out_mask) +{ + struct parallel_data *pd; + int ret = 0; + + rcu_read_lock_bh(); + pd = rcu_dereference(pinst->pd); + switch (cpumask_type) { + case PADATA_CPU_SERIAL: + cpumask_copy(out_mask, pd->cpumask.cbcpu); + break; + case PADATA_CPU_PARALLEL: + cpumask_copy(out_mask, pd->cpumask.pcpu); + break; + default: + ret = -EINVAL; + } + + rcu_read_unlock_bh(); + return ret; +} +EXPORT_SYMBOL(padata_get_cpumask); + +/** + * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value + * equivalent to @cpumask. * * @pinst: padata instance + * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding + * to parallel and serial cpumasks respectively. * @cpumask: the cpumask to use */ -int padata_set_cpumask(struct padata_instance *pinst, - cpumask_var_t cpumask) +int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, + cpumask_var_t cpumask) +{ + struct cpumask *serial_mask, *parallel_mask; + + switch (cpumask_type) { + case PADATA_CPU_PARALLEL: + serial_mask = pinst->cpumask.cbcpu; + parallel_mask = cpumask; + break; + case PADATA_CPU_SERIAL: + parallel_mask = pinst->cpumask.pcpu; + serial_mask = cpumask; + break; + default: + return -EINVAL; + } + + return __padata_set_cpumasks(pinst, parallel_mask, serial_mask); +} +EXPORT_SYMBOL(padata_set_cpumask); + +/** + * __padata_set_cpumasks - Set both parallel and serial cpumasks. The first + * one is used by parallel workers and the second one + * by the wokers doing serialization. + * + * @pinst: padata instance + * @pcpumask: the cpumask to use for parallel workers + * @cbcpumask: the cpumsak to use for serial workers + */ +int __padata_set_cpumasks(struct padata_instance *pinst, + cpumask_var_t pcpumask, cpumask_var_t cbcpumask) { int valid; int err = 0; @@ -518,7 +672,13 @@ int padata_set_cpumask(struct padata_instance *pinst, mutex_lock(&pinst->lock); - valid = padata_validate_cpumask(pinst, cpumask); + valid = padata_validate_cpumask(pinst, pcpumask); + if (!valid) { + __padata_stop(pinst); + goto out_replace; + } + + valid = padata_validate_cpumask(pinst, cbcpumask); if (!valid) { __padata_stop(pinst); goto out_replace; @@ -526,14 +686,15 @@ int padata_set_cpumask(struct padata_instance *pinst, get_online_cpus(); - pd = padata_alloc_pd(pinst, cpumask); + pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); if (!pd) { err = -ENOMEM; goto out; } out_replace: - cpumask_copy(pinst->cpumask, cpumask); + cpumask_copy(pinst->cpumask.pcpu, pcpumask); + cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); padata_replace(pinst, pd); @@ -546,41 +707,57 @@ out: mutex_unlock(&pinst->lock); return err; + } -EXPORT_SYMBOL(padata_set_cpumask); +EXPORT_SYMBOL(__padata_set_cpumasks); static int __padata_add_cpu(struct padata_instance *pinst, int cpu) { struct parallel_data *pd; if (cpumask_test_cpu(cpu, cpu_active_mask)) { - pd = padata_alloc_pd(pinst, pinst->cpumask); + pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, + pinst->cpumask.cbcpu); if (!pd) return -ENOMEM; padata_replace(pinst, pd); - if (padata_validate_cpumask(pinst, pinst->cpumask)) + if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && + padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) __padata_start(pinst); } return 0; } -/** - * padata_add_cpu - add a cpu to the padata cpumask + /** + * padata_add_cpu - add a cpu to one or both(parallel and serial) + * padata cpumasks. * * @pinst: padata instance * @cpu: cpu to add + * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added. + * The @mask may be any combination of the following flags: + * PADATA_CPU_SERIAL - serial cpumask + * PADATA_CPU_PARALLEL - parallel cpumask */ -int padata_add_cpu(struct padata_instance *pinst, int cpu) + +int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask) { int err; + if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) + return -EINVAL; + mutex_lock(&pinst->lock); get_online_cpus(); - cpumask_set_cpu(cpu, pinst->cpumask); + if (mask & PADATA_CPU_SERIAL) + cpumask_set_cpu(cpu, pinst->cpumask.cbcpu); + if (mask & PADATA_CPU_PARALLEL) + cpumask_set_cpu(cpu, pinst->cpumask.pcpu); + err = __padata_add_cpu(pinst, cpu); put_online_cpus(); @@ -596,13 +773,15 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) if (cpumask_test_cpu(cpu, cpu_online_mask)) { - if (!padata_validate_cpumask(pinst, pinst->cpumask)) { + if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || + !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) { __padata_stop(pinst); padata_replace(pinst, pd); goto out; } - pd = padata_alloc_pd(pinst, pinst->cpumask); + pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, + pinst->cpumask.cbcpu); if (!pd) return -ENOMEM; @@ -613,20 +792,32 @@ out: return 0; } -/** - * padata_remove_cpu - remove a cpu from the padata cpumask + /** + * padata_remove_cpu - remove a cpu from the one or both(serial and paralell) + * padata cpumasks. * * @pinst: padata instance * @cpu: cpu to remove + * @mask: bitmask specifying from which cpumask @cpu should be removed + * The @mask may be any combination of the following flags: + * PADATA_CPU_SERIAL - serial cpumask + * PADATA_CPU_PARALLEL - parallel cpumask */ -int padata_remove_cpu(struct padata_instance *pinst, int cpu) +int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask) { int err; + if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) + return -EINVAL; + mutex_lock(&pinst->lock); get_online_cpus(); - cpumask_clear_cpu(cpu, pinst->cpumask); + if (mask & PADATA_CPU_SERIAL) + cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu); + if (mask & PADATA_CPU_PARALLEL) + cpumask_clear_cpu(cpu, pinst->cpumask.pcpu); + err = __padata_remove_cpu(pinst, cpu); put_online_cpus(); @@ -672,6 +863,14 @@ void padata_stop(struct padata_instance *pinst) EXPORT_SYMBOL(padata_stop); #ifdef CONFIG_HOTPLUG_CPU + +static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) +{ + return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || + cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); +} + + static int padata_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -684,7 +883,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: - if (!cpumask_test_cpu(cpu, pinst->cpumask)) + if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); err = __padata_add_cpu(pinst, cpu); @@ -695,7 +894,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: - if (!cpumask_test_cpu(cpu, pinst->cpumask)) + if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); err = __padata_remove_cpu(pinst, cpu); @@ -706,7 +905,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: - if (!cpumask_test_cpu(cpu, pinst->cpumask)) + if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); __padata_remove_cpu(pinst, cpu); @@ -714,7 +913,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: - if (!cpumask_test_cpu(cpu, pinst->cpumask)) + if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); __padata_add_cpu(pinst, cpu); @@ -726,13 +925,29 @@ static int padata_cpu_callback(struct notifier_block *nfb, #endif /** - * padata_alloc - allocate and initialize a padata instance + * padata_alloc - Allocate and initialize padata instance. + * Use default cpumask(cpu_possible_mask) + * for serial and parallel workes. + * + * @wq: workqueue to use for the allocated padata instance + */ +struct padata_instance *padata_alloc(struct workqueue_struct *wq) +{ + return __padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); +} +EXPORT_SYMBOL(padata_alloc); + +/** + * __padata_alloc - allocate and initialize a padata instance + * and specify cpumasks for serial and parallel workers. * - * @cpumask: cpumask that padata uses for parallelization * @wq: workqueue to use for the allocated padata instance + * @pcpumask: cpumask that will be used for padata parallelization + * @cbcpumask: cpumask that will be used for padata serialization */ -struct padata_instance *padata_alloc(const struct cpumask *cpumask, - struct workqueue_struct *wq) +struct padata_instance *__padata_alloc(struct workqueue_struct *wq, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask) { struct padata_instance *pinst; struct parallel_data *pd = NULL; @@ -742,21 +957,26 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask, goto err; get_online_cpus(); - - if (!alloc_cpumask_var(&pinst->cpumask, GFP_KERNEL)) + if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) + goto err_free_inst; + if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { + free_cpumask_var(pinst->cpumask.pcpu); goto err_free_inst; - - if (padata_validate_cpumask(pinst, cpumask)) { - pd = padata_alloc_pd(pinst, cpumask); - if (!pd) - goto err_free_mask; } + if (!padata_validate_cpumask(pinst, pcpumask) || + !padata_validate_cpumask(pinst, cbcpumask)) + goto err_free_masks; + + pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); + if (!pd) + goto err_free_masks; rcu_assign_pointer(pinst->pd, pd); pinst->wq = wq; - cpumask_copy(pinst->cpumask, cpumask); + cpumask_copy(pinst->cpumask.pcpu, pcpumask); + cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); pinst->flags = 0; @@ -768,19 +988,21 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask, put_online_cpus(); + BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); mutex_init(&pinst->lock); return pinst; -err_free_mask: - free_cpumask_var(pinst->cpumask); +err_free_masks: + free_cpumask_var(pinst->cpumask.pcpu); + free_cpumask_var(pinst->cpumask.cbcpu); err_free_inst: kfree(pinst); put_online_cpus(); err: return NULL; } -EXPORT_SYMBOL(padata_alloc); +EXPORT_SYMBOL(__padata_alloc); /** * padata_free - free a padata instance @@ -795,7 +1017,8 @@ void padata_free(struct padata_instance *pinst) padata_stop(pinst); padata_free_pd(pinst->pd); - free_cpumask_var(pinst->cpumask); + free_cpumask_var(pinst->cpumask.pcpu); + free_cpumask_var(pinst->cpumask.cbcpu); kfree(pinst); } EXPORT_SYMBOL(padata_free); -- cgit v1.1 From 5e017dc3f8bc9e4a28983666e6bc00114a2018bb Mon Sep 17 00:00:00 2001 From: Dan Kruchinin Date: Wed, 14 Jul 2010 14:33:08 +0400 Subject: padata: Added sysfs primitives to padata subsystem Added sysfs primitives to padata subsystem. Now API user may embedded kobject each padata instance contains into any sysfs hierarchy. For now padata sysfs interface provides only two objects: serial_cpumask [RW] - cpumask for serial workers parallel_cpumask [RW] - cpumask for parallel workers Signed-off-by: Dan Kruchinin Signed-off-by: Herbert Xu --- kernel/padata.c | 155 ++++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 146 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/padata.c b/kernel/padata.c index 84d0ca9..526f9ea 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #define MAX_SEQ_NR (INT_MAX - NR_CPUS) @@ -924,6 +925,149 @@ static int padata_cpu_callback(struct notifier_block *nfb, } #endif +static void __padata_free(struct padata_instance *pinst) +{ +#ifdef CONFIG_HOTPLUG_CPU + unregister_hotcpu_notifier(&pinst->cpu_notifier); +#endif + + padata_stop(pinst); + padata_free_pd(pinst->pd); + free_cpumask_var(pinst->cpumask.pcpu); + free_cpumask_var(pinst->cpumask.cbcpu); + kfree(pinst); +} + +#define kobj2pinst(_kobj) \ + container_of(_kobj, struct padata_instance, kobj) +#define attr2pentry(_attr) \ + container_of(_attr, struct padata_sysfs_entry, attr) + +static void padata_sysfs_release(struct kobject *kobj) +{ + struct padata_instance *pinst = kobj2pinst(kobj); + __padata_free(pinst); +} + +struct padata_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct padata_instance *, struct attribute *, char *); + ssize_t (*store)(struct padata_instance *, struct attribute *, + const char *, size_t); +}; + +static ssize_t show_cpumask(struct padata_instance *pinst, + struct attribute *attr, char *buf) +{ + struct cpumask *cpumask; + ssize_t len; + + mutex_lock(&pinst->lock); + if (!strcmp(attr->name, "serial_cpumask")) + cpumask = pinst->cpumask.cbcpu; + else + cpumask = pinst->cpumask.pcpu; + + len = bitmap_scnprintf(buf, PAGE_SIZE, cpumask_bits(cpumask), + nr_cpu_ids); + if (PAGE_SIZE - len < 2) + len = -EINVAL; + else + len += sprintf(buf + len, "\n"); + + mutex_unlock(&pinst->lock); + return len; +} + +static ssize_t store_cpumask(struct padata_instance *pinst, + struct attribute *attr, + const char *buf, size_t count) +{ + cpumask_var_t new_cpumask; + ssize_t ret; + int mask_type; + + if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) + return -ENOMEM; + + ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), + nr_cpumask_bits); + if (ret < 0) + goto out; + + mask_type = !strcmp(attr->name, "serial_cpumask") ? + PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; + ret = padata_set_cpumask(pinst, mask_type, new_cpumask); + if (!ret) + ret = count; + +out: + free_cpumask_var(new_cpumask); + return ret; +} + +#define PADATA_ATTR_RW(_name, _show_name, _store_name) \ + static struct padata_sysfs_entry _name##_attr = \ + __ATTR(_name, 0644, _show_name, _store_name) +#define PADATA_ATTR_RO(_name, _show_name) \ + static struct padata_sysfs_entry _name##_attr = \ + __ATTR(_name, 0400, _show_name, NULL) + +PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); +PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); + +/* + * Padata sysfs provides the following objects: + * serial_cpumask [RW] - cpumask for serial workers + * parallel_cpumask [RW] - cpumask for parallel workers + */ +static struct attribute *padata_default_attrs[] = { + &serial_cpumask_attr.attr, + ¶llel_cpumask_attr.attr, + NULL, +}; + +static ssize_t padata_sysfs_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct padata_instance *pinst; + struct padata_sysfs_entry *pentry; + ssize_t ret = -EIO; + + pinst = kobj2pinst(kobj); + pentry = attr2pentry(attr); + if (pentry->show) + ret = pentry->show(pinst, attr, buf); + + return ret; +} + +static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct padata_instance *pinst; + struct padata_sysfs_entry *pentry; + ssize_t ret = -EIO; + + pinst = kobj2pinst(kobj); + pentry = attr2pentry(attr); + if (pentry->show) + ret = pentry->store(pinst, attr, buf, count); + + return ret; +} + +static const struct sysfs_ops padata_sysfs_ops = { + .show = padata_sysfs_show, + .store = padata_sysfs_store, +}; + +static struct kobj_type padata_attr_type = { + .sysfs_ops = &padata_sysfs_ops, + .default_attrs = padata_default_attrs, + .release = padata_sysfs_release, +}; + /** * padata_alloc - Allocate and initialize padata instance. * Use default cpumask(cpu_possible_mask) @@ -989,6 +1133,7 @@ struct padata_instance *__padata_alloc(struct workqueue_struct *wq, put_online_cpus(); BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); + kobject_init(&pinst->kobj, &padata_attr_type); mutex_init(&pinst->lock); return pinst; @@ -1011,14 +1156,6 @@ EXPORT_SYMBOL(__padata_alloc); */ void padata_free(struct padata_instance *pinst) { -#ifdef CONFIG_HOTPLUG_CPU - unregister_hotcpu_notifier(&pinst->cpu_notifier); -#endif - - padata_stop(pinst); - padata_free_pd(pinst->pd); - free_cpumask_var(pinst->cpumask.pcpu); - free_cpumask_var(pinst->cpumask.cbcpu); - kfree(pinst); + kobject_put(&pinst->kobj); } EXPORT_SYMBOL(padata_free); -- cgit v1.1 From a2531293dbb7608fa672ff28efe3ab4027917a2f Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Sun, 18 Jul 2010 14:27:13 +0200 Subject: update email address pavel@suse.cz no longer works, replace it with working address. Signed-off-by: Pavel Machek Signed-off-by: Jiri Kosina --- kernel/debug/debug_core.c | 2 +- kernel/debug/gdbstub.c | 2 +- kernel/power/hibernate.c | 2 +- kernel/power/snapshot.c | 2 +- kernel/power/swap.c | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 5cb7cd1..568efbc 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -6,7 +6,7 @@ * Copyright (C) 2000-2001 VERITAS Software Corporation. * Copyright (C) 2002-2004 Timesys Corporation * Copyright (C) 2003-2004 Amit S. Kale - * Copyright (C) 2004 Pavel Machek + * Copyright (C) 2004 Pavel Machek * Copyright (C) 2004-2006 Tom Rini * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd. * Copyright (C) 2005-2009 Wind River Systems, Inc. diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c index 4b17b32..4e58472 100644 --- a/kernel/debug/gdbstub.c +++ b/kernel/debug/gdbstub.c @@ -6,7 +6,7 @@ * Copyright (C) 2000-2001 VERITAS Software Corporation. * Copyright (C) 2002-2004 Timesys Corporation * Copyright (C) 2003-2004 Amit S. Kale - * Copyright (C) 2004 Pavel Machek + * Copyright (C) 2004 Pavel Machek * Copyright (C) 2004-2006 Tom Rini * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd. * Copyright (C) 2005-2009 Wind River Systems, Inc. diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index aa9e916..6b202e7 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -3,7 +3,7 @@ * * Copyright (c) 2003 Patrick Mochel * Copyright (c) 2003 Open Source Development Lab - * Copyright (c) 2004 Pavel Machek + * Copyright (c) 2004 Pavel Machek * Copyright (c) 2009 Rafael J. Wysocki, Novell Inc. * * This file is released under the GPLv2. diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 25ce010..f6cd6fa 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -3,7 +3,7 @@ * * This file provides system snapshot/restore functionality for swsusp. * - * Copyright (C) 1998-2005 Pavel Machek + * Copyright (C) 1998-2005 Pavel Machek * Copyright (C) 2006 Rafael J. Wysocki * * This file is released under the GPLv2. diff --git a/kernel/power/swap.c b/kernel/power/swap.c index b0bb217..48a0aa9 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -4,7 +4,7 @@ * This file provides functions for reading the suspend image from * and writing it to a swap partition. * - * Copyright (C) 1998,2001-2005 Pavel Machek + * Copyright (C) 1998,2001-2005 Pavel Machek * Copyright (C) 2006 Rafael J. Wysocki * * This file is released under the GPLv2. -- cgit v1.1 From 931ac77ef65d2d90ee1def63d2041402ec7c53ab Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Jul 2010 11:07:48 +0200 Subject: workqueue: fix build problem on !CONFIG_SMP Commit f3421797 (workqueue: implement unbound workqueue) incorrectly tested CONFIG_SMP as part of a C expression in alloc/free_cwqs(). As CONFIG_SMP is not defined in UP, this breaks build. Fix it by using Found during linux-next build test. Signed-off-by: Tejun Heo Reported-by: Stephen Rothwell --- kernel/workqueue.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index aca9472..79a11e4 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2615,11 +2615,15 @@ static int alloc_cwqs(struct workqueue_struct *wq) const size_t size = sizeof(struct cpu_workqueue_struct); const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, __alignof__(unsigned long long)); +#ifdef CONFIG_SMP + bool percpu = !(wq->flags & WQ_UNBOUND); +#else + bool percpu = false; +#endif - if (CONFIG_SMP && !(wq->flags & WQ_UNBOUND)) { - /* on SMP, percpu allocator can align itself */ + if (percpu) wq->cpu_wq.pcpu = __alloc_percpu(size, align); - } else { + else { void *ptr; /* @@ -2641,7 +2645,13 @@ static int alloc_cwqs(struct workqueue_struct *wq) static void free_cwqs(struct workqueue_struct *wq) { - if (CONFIG_SMP && !(wq->flags & WQ_UNBOUND)) +#ifdef CONFIG_SMP + bool percpu = !(wq->flags & WQ_UNBOUND); +#else + bool percpu = false; +#endif + + if (percpu) free_percpu(wq->cpu_wq.pcpu); else if (wq->cpu_wq.single) { /* the pointer to free is stored right after the cwq */ -- cgit v1.1 From f376bf5ffbad863d4bc3b2586b7e34cdf756ad17 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 16 Jul 2010 00:26:26 +0200 Subject: tracing: Remove sysprof ftrace plugin The sysprof ftrace plugin doesn't seem to be seriously used somewhere. There is a branch in the sysprof tree that makes an interface to it, but the real sysprof tool uses either its own module or perf events. Drop the sysprof ftrace plugin then, as it's mostly useless. Signed-off-by: Frederic Weisbecker Acked-by: Soeren Sandmann Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Steven Rostedt Cc: Li Zefan --- kernel/trace/Kconfig | 9 -- kernel/trace/Makefile | 1 - kernel/trace/trace.c | 3 - kernel/trace/trace.h | 3 - kernel/trace/trace_selftest.c | 32 ---- kernel/trace/trace_sysprof.c | 330 ------------------------------------------ 6 files changed, 378 deletions(-) delete mode 100644 kernel/trace/trace_sysprof.c (limited to 'kernel') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index f5306cb..c7683fd 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -194,15 +194,6 @@ config PREEMPT_TRACER enabled. This option and the irqs-off timing option can be used together or separately.) -config SYSPROF_TRACER - bool "Sysprof Tracer" - depends on X86 - select GENERIC_TRACER - select CONTEXT_SWITCH_TRACER - help - This tracer provides the trace needed by the 'Sysprof' userspace - tool. - config SCHED_TRACER bool "Scheduling Latency Tracer" select GENERIC_TRACER diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 84b2c99..438e84a 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -30,7 +30,6 @@ obj-$(CONFIG_TRACING) += trace_output.o obj-$(CONFIG_TRACING) += trace_stat.o obj-$(CONFIG_TRACING) += trace_printk.o obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o -obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8683dec..78a49e6 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4354,9 +4354,6 @@ static __init int tracer_init_debugfs(void) trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, &ftrace_update_tot_cnt, &tracing_dyn_info_fops); #endif -#ifdef CONFIG_SYSPROF_TRACER - init_tracer_sysprof_debugfs(d_tracer); -#endif create_trace_options_dir(); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 84d3f12..2114b4c1 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -296,7 +296,6 @@ struct dentry *trace_create_file(const char *name, const struct file_operations *fops); struct dentry *tracing_init_dentry(void); -void init_tracer_sysprof_debugfs(struct dentry *d_tracer); struct ring_buffer_event; @@ -428,8 +427,6 @@ extern int trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr); extern int trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr); -extern int trace_selftest_startup_sysprof(struct tracer *trace, - struct trace_array *tr); extern int trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr); #endif /* CONFIG_FTRACE_STARTUP_TEST */ diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 39a5ca4..6ed05ee 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -690,38 +690,6 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr } #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ -#ifdef CONFIG_SYSPROF_TRACER -int -trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) -{ - unsigned long count; - int ret; - - /* start the tracing */ - ret = tracer_init(trace, tr); - if (ret) { - warn_failed_init_tracer(trace, ret); - return ret; - } - - /* Sleep for a 1/10 of a second */ - msleep(100); - /* stop the tracing. */ - tracing_stop(); - /* check the trace buffer */ - ret = trace_test_buffer(tr, &count); - trace->reset(tr); - tracing_start(); - - if (!ret && !count) { - printk(KERN_CONT ".. no entries found .."); - ret = -1; - } - - return ret; -} -#endif /* CONFIG_SYSPROF_TRACER */ - #ifdef CONFIG_BRANCH_TRACER int trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c deleted file mode 100644 index c080956..0000000 --- a/kernel/trace/trace_sysprof.c +++ /dev/null @@ -1,330 +0,0 @@ -/* - * trace stack traces - * - * Copyright (C) 2004-2008, Soeren Sandmann - * Copyright (C) 2007 Steven Rostedt - * Copyright (C) 2008 Ingo Molnar - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "trace.h" - -static struct trace_array *sysprof_trace; -static int __read_mostly tracer_enabled; - -/* - * 1 msec sample interval by default: - */ -static unsigned long sample_period = 1000000; -static const unsigned int sample_max_depth = 512; - -static DEFINE_MUTEX(sample_timer_lock); -/* - * Per CPU hrtimers that do the profiling: - */ -static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer); - -struct stack_frame_user { - const void __user *next_fp; - unsigned long return_address; -}; - -static int -copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) -{ - int ret; - - if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) - return 0; - - ret = 1; - pagefault_disable(); - if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) - ret = 0; - pagefault_enable(); - - return ret; -} - -struct backtrace_info { - struct trace_array_cpu *data; - struct trace_array *tr; - int pos; -}; - -static void -backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) -{ - /* Ignore warnings */ -} - -static void backtrace_warning(void *data, char *msg) -{ - /* Ignore warnings */ -} - -static int backtrace_stack(void *data, char *name) -{ - /* Don't bother with IRQ stacks for now */ - return -1; -} - -static void backtrace_address(void *data, unsigned long addr, int reliable) -{ - struct backtrace_info *info = data; - - if (info->pos < sample_max_depth && reliable) { - __trace_special(info->tr, info->data, 1, addr, 0); - - info->pos++; - } -} - -static const struct stacktrace_ops backtrace_ops = { - .warning = backtrace_warning, - .warning_symbol = backtrace_warning_symbol, - .stack = backtrace_stack, - .address = backtrace_address, - .walk_stack = print_context_stack, -}; - -static int -trace_kernel(struct pt_regs *regs, struct trace_array *tr, - struct trace_array_cpu *data) -{ - struct backtrace_info info; - unsigned long bp; - char *stack; - - info.tr = tr; - info.data = data; - info.pos = 1; - - __trace_special(info.tr, info.data, 1, regs->ip, 0); - - stack = ((char *)regs + sizeof(struct pt_regs)); -#ifdef CONFIG_FRAME_POINTER - bp = regs->bp; -#else - bp = 0; -#endif - - dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info); - - return info.pos; -} - -static void timer_notify(struct pt_regs *regs, int cpu) -{ - struct trace_array_cpu *data; - struct stack_frame_user frame; - struct trace_array *tr; - const void __user *fp; - int is_user; - int i; - - if (!regs) - return; - - tr = sysprof_trace; - data = tr->data[cpu]; - is_user = user_mode(regs); - - if (!current || current->pid == 0) - return; - - if (is_user && current->state != TASK_RUNNING) - return; - - __trace_special(tr, data, 0, 0, current->pid); - - if (!is_user) - i = trace_kernel(regs, tr, data); - else - i = 0; - - /* - * Trace user stack if we are not a kernel thread - */ - if (current->mm && i < sample_max_depth) { - regs = (struct pt_regs *)current->thread.sp0 - 1; - - fp = (void __user *)regs->bp; - - __trace_special(tr, data, 2, regs->ip, 0); - - while (i < sample_max_depth) { - frame.next_fp = NULL; - frame.return_address = 0; - if (!copy_stack_frame(fp, &frame)) - break; - if ((unsigned long)fp < regs->sp) - break; - - __trace_special(tr, data, 2, frame.return_address, - (unsigned long)fp); - fp = frame.next_fp; - - i++; - } - - } - - /* - * Special trace entry if we overflow the max depth: - */ - if (i == sample_max_depth) - __trace_special(tr, data, -1, -1, -1); - - __trace_special(tr, data, 3, current->pid, i); -} - -static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer) -{ - /* trace here */ - timer_notify(get_irq_regs(), smp_processor_id()); - - hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period)); - - return HRTIMER_RESTART; -} - -static void start_stack_timer(void *unused) -{ - struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer); - - hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - hrtimer->function = stack_trace_timer_fn; - - hrtimer_start(hrtimer, ns_to_ktime(sample_period), - HRTIMER_MODE_REL_PINNED); -} - -static void start_stack_timers(void) -{ - on_each_cpu(start_stack_timer, NULL, 1); -} - -static void stop_stack_timer(int cpu) -{ - struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); - - hrtimer_cancel(hrtimer); -} - -static void stop_stack_timers(void) -{ - int cpu; - - for_each_online_cpu(cpu) - stop_stack_timer(cpu); -} - -static void stop_stack_trace(struct trace_array *tr) -{ - mutex_lock(&sample_timer_lock); - stop_stack_timers(); - tracer_enabled = 0; - mutex_unlock(&sample_timer_lock); -} - -static int stack_trace_init(struct trace_array *tr) -{ - sysprof_trace = tr; - - tracing_start_cmdline_record(); - - mutex_lock(&sample_timer_lock); - start_stack_timers(); - tracer_enabled = 1; - mutex_unlock(&sample_timer_lock); - return 0; -} - -static void stack_trace_reset(struct trace_array *tr) -{ - tracing_stop_cmdline_record(); - stop_stack_trace(tr); -} - -static struct tracer stack_trace __read_mostly = -{ - .name = "sysprof", - .init = stack_trace_init, - .reset = stack_trace_reset, -#ifdef CONFIG_FTRACE_SELFTEST - .selftest = trace_selftest_startup_sysprof, -#endif -}; - -__init static int init_stack_trace(void) -{ - return register_tracer(&stack_trace); -} -device_initcall(init_stack_trace); - -#define MAX_LONG_DIGITS 22 - -static ssize_t -sysprof_sample_read(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - char buf[MAX_LONG_DIGITS]; - int r; - - r = sprintf(buf, "%ld\n", nsecs_to_usecs(sample_period)); - - return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); -} - -static ssize_t -sysprof_sample_write(struct file *filp, const char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - char buf[MAX_LONG_DIGITS]; - unsigned long val; - - if (cnt > MAX_LONG_DIGITS-1) - cnt = MAX_LONG_DIGITS-1; - - if (copy_from_user(&buf, ubuf, cnt)) - return -EFAULT; - - buf[cnt] = 0; - - val = simple_strtoul(buf, NULL, 10); - /* - * Enforce a minimum sample period of 100 usecs: - */ - if (val < 100) - val = 100; - - mutex_lock(&sample_timer_lock); - stop_stack_timers(); - sample_period = val * 1000; - start_stack_timers(); - mutex_unlock(&sample_timer_lock); - - return cnt; -} - -static const struct file_operations sysprof_sample_fops = { - .read = sysprof_sample_read, - .write = sysprof_sample_write, -}; - -void init_tracer_sysprof_debugfs(struct dentry *d_tracer) -{ - - trace_create_file("sysprof_sample_period", 0644, - d_tracer, NULL, &sysprof_sample_fops); -} -- cgit v1.1 From eb7beb5c09af75494234ea6acd09d0a647cf7338 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 16 Jul 2010 00:50:03 +0200 Subject: tracing: Remove special traces Special traces type was only used by sysprof. Lets remove it now that sysprof ftrace plugin has been dropped. Signed-off-by: Frederic Weisbecker Acked-by: Soeren Sandmann Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Steven Rostedt Cc: Li Zefan --- kernel/trace/trace.c | 55 ------------------------------------ kernel/trace/trace.h | 7 ----- kernel/trace/trace_entries.h | 17 ----------- kernel/trace/trace_output.c | 66 ------------------------------------------- kernel/trace/trace_selftest.c | 1 - 5 files changed, 146 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 78a49e6..d9a4aa0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1331,61 +1331,6 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags) #endif /* CONFIG_STACKTRACE */ -static void -ftrace_trace_special(void *__tr, - unsigned long arg1, unsigned long arg2, unsigned long arg3, - int pc) -{ - struct ftrace_event_call *call = &event_special; - struct ring_buffer_event *event; - struct trace_array *tr = __tr; - struct ring_buffer *buffer = tr->buffer; - struct special_entry *entry; - - event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL, - sizeof(*entry), 0, pc); - if (!event) - return; - entry = ring_buffer_event_data(event); - entry->arg1 = arg1; - entry->arg2 = arg2; - entry->arg3 = arg3; - - if (!filter_check_discard(call, entry, buffer, event)) - trace_buffer_unlock_commit(buffer, event, 0, pc); -} - -void -__trace_special(void *__tr, void *__data, - unsigned long arg1, unsigned long arg2, unsigned long arg3) -{ - ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count()); -} - -void -ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) -{ - struct trace_array *tr = &global_trace; - struct trace_array_cpu *data; - unsigned long flags; - int cpu; - int pc; - - if (tracing_disabled) - return; - - pc = preempt_count(); - local_irq_save(flags); - cpu = raw_smp_processor_id(); - data = tr->data[cpu]; - - if (likely(atomic_inc_return(&data->disabled) == 1)) - ftrace_trace_special(tr, arg1, arg2, arg3, pc); - - atomic_dec(&data->disabled); - local_irq_restore(flags); -} - /** * trace_vbprintk - write binary msg to tracing buffer * diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 2114b4c1..638a588 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -22,7 +22,6 @@ enum trace_type { TRACE_STACK, TRACE_PRINT, TRACE_BPRINT, - TRACE_SPECIAL, TRACE_MMIO_RW, TRACE_MMIO_MAP, TRACE_BRANCH, @@ -189,7 +188,6 @@ extern void __ftrace_bad_type(void); IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ - IF_ASSIGN(var, ent, struct special_entry, 0); \ IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ TRACE_MMIO_RW); \ IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ @@ -332,11 +330,6 @@ void tracing_sched_wakeup_trace(struct trace_array *tr, struct task_struct *wakee, struct task_struct *cur, unsigned long flags, int pc); -void trace_special(struct trace_array *tr, - struct trace_array_cpu *data, - unsigned long arg1, - unsigned long arg2, - unsigned long arg3, int pc); void trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index 8412837..e3dfeca 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -151,23 +151,6 @@ FTRACE_ENTRY_DUP(wakeup, ctx_switch_entry, ); /* - * Special (free-form) trace entry: - */ -FTRACE_ENTRY(special, special_entry, - - TRACE_SPECIAL, - - F_STRUCT( - __field( unsigned long, arg1 ) - __field( unsigned long, arg2 ) - __field( unsigned long, arg3 ) - ), - - F_printk("(%08lx) (%08lx) (%08lx)", - __entry->arg1, __entry->arg2, __entry->arg3) -); - -/* * Stack-trace entry: */ diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 57c1b45..a46197b 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -1069,65 +1069,6 @@ static struct trace_event trace_wake_event = { .funcs = &trace_wake_funcs, }; -/* TRACE_SPECIAL */ -static enum print_line_t trace_special_print(struct trace_iterator *iter, - int flags, struct trace_event *event) -{ - struct special_entry *field; - - trace_assign_type(field, iter->ent); - - if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n", - field->arg1, - field->arg2, - field->arg3)) - return TRACE_TYPE_PARTIAL_LINE; - - return TRACE_TYPE_HANDLED; -} - -static enum print_line_t trace_special_hex(struct trace_iterator *iter, - int flags, struct trace_event *event) -{ - struct special_entry *field; - struct trace_seq *s = &iter->seq; - - trace_assign_type(field, iter->ent); - - SEQ_PUT_HEX_FIELD_RET(s, field->arg1); - SEQ_PUT_HEX_FIELD_RET(s, field->arg2); - SEQ_PUT_HEX_FIELD_RET(s, field->arg3); - - return TRACE_TYPE_HANDLED; -} - -static enum print_line_t trace_special_bin(struct trace_iterator *iter, - int flags, struct trace_event *event) -{ - struct special_entry *field; - struct trace_seq *s = &iter->seq; - - trace_assign_type(field, iter->ent); - - SEQ_PUT_FIELD_RET(s, field->arg1); - SEQ_PUT_FIELD_RET(s, field->arg2); - SEQ_PUT_FIELD_RET(s, field->arg3); - - return TRACE_TYPE_HANDLED; -} - -static struct trace_event_functions trace_special_funcs = { - .trace = trace_special_print, - .raw = trace_special_print, - .hex = trace_special_hex, - .binary = trace_special_bin, -}; - -static struct trace_event trace_special_event = { - .type = TRACE_SPECIAL, - .funcs = &trace_special_funcs, -}; - /* TRACE_STACK */ static enum print_line_t trace_stack_print(struct trace_iterator *iter, @@ -1161,9 +1102,6 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter, static struct trace_event_functions trace_stack_funcs = { .trace = trace_stack_print, - .raw = trace_special_print, - .hex = trace_special_hex, - .binary = trace_special_bin, }; static struct trace_event trace_stack_event = { @@ -1194,9 +1132,6 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, static struct trace_event_functions trace_user_stack_funcs = { .trace = trace_user_stack_print, - .raw = trace_special_print, - .hex = trace_special_hex, - .binary = trace_special_bin, }; static struct trace_event trace_user_stack_event = { @@ -1314,7 +1249,6 @@ static struct trace_event *events[] __initdata = { &trace_fn_event, &trace_ctx_event, &trace_wake_event, - &trace_special_event, &trace_stack_event, &trace_user_stack_event, &trace_bprint_event, diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 6ed05ee..155a415 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -13,7 +13,6 @@ static inline int trace_valid_entry(struct trace_entry *entry) case TRACE_WAKE: case TRACE_STACK: case TRACE_PRINT: - case TRACE_SPECIAL: case TRACE_BRANCH: case TRACE_GRAPH_ENT: case TRACE_GRAPH_RET: -- cgit v1.1 From b444786f1a797a7f84e2561346a670649f9c7b3c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 7 Jul 2010 23:40:11 +0200 Subject: tracing: Use generic_file_llseek for debugfs The default for llseek will change to no_llseek, so the tracing debugfs files need to add explicit .llseek assignments. Since we're dealing with regular files from a VFS perspective, use generic_file_llseek. Signed-off-by: Arnd Bergmann Cc: Steven Rostedt Cc: Ingo Molnar Cc: John Kacur Cc: Li Zefan LKML-Reference: <1278538820-1392-10-git-send-email-arnd@arndb.de> Signed-off-by: Frederic Weisbecker --- kernel/trace/trace.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d9a4aa0..c1752da 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2338,6 +2338,7 @@ static const struct file_operations show_traces_fops = { .open = show_traces_open, .read = seq_read, .release = seq_release, + .llseek = seq_lseek, }; /* @@ -2431,6 +2432,7 @@ static const struct file_operations tracing_cpumask_fops = { .open = tracing_open_generic, .read = tracing_cpumask_read, .write = tracing_cpumask_write, + .llseek = generic_file_llseek, }; static int tracing_trace_options_show(struct seq_file *m, void *v) @@ -2597,6 +2599,7 @@ tracing_readme_read(struct file *filp, char __user *ubuf, static const struct file_operations tracing_readme_fops = { .open = tracing_open_generic, .read = tracing_readme_read, + .llseek = generic_file_llseek, }; static ssize_t @@ -2647,6 +2650,7 @@ tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, static const struct file_operations tracing_saved_cmdlines_fops = { .open = tracing_open_generic, .read = tracing_saved_cmdlines_read, + .llseek = generic_file_llseek, }; static ssize_t @@ -2976,6 +2980,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) if (iter->trace->pipe_open) iter->trace->pipe_open(iter); + nonseekable_open(inode, filp); out: mutex_unlock(&trace_types_lock); return ret; @@ -3534,18 +3539,21 @@ static const struct file_operations tracing_max_lat_fops = { .open = tracing_open_generic, .read = tracing_max_lat_read, .write = tracing_max_lat_write, + .llseek = generic_file_llseek, }; static const struct file_operations tracing_ctrl_fops = { .open = tracing_open_generic, .read = tracing_ctrl_read, .write = tracing_ctrl_write, + .llseek = generic_file_llseek, }; static const struct file_operations set_tracer_fops = { .open = tracing_open_generic, .read = tracing_set_trace_read, .write = tracing_set_trace_write, + .llseek = generic_file_llseek, }; static const struct file_operations tracing_pipe_fops = { @@ -3554,17 +3562,20 @@ static const struct file_operations tracing_pipe_fops = { .read = tracing_read_pipe, .splice_read = tracing_splice_read_pipe, .release = tracing_release_pipe, + .llseek = no_llseek, }; static const struct file_operations tracing_entries_fops = { .open = tracing_open_generic, .read = tracing_entries_read, .write = tracing_entries_write, + .llseek = generic_file_llseek, }; static const struct file_operations tracing_mark_fops = { .open = tracing_open_generic, .write = tracing_mark_write, + .llseek = generic_file_llseek, }; static const struct file_operations trace_clock_fops = { @@ -3870,6 +3881,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, static const struct file_operations tracing_stats_fops = { .open = tracing_open_generic, .read = tracing_stats_read, + .llseek = generic_file_llseek, }; #ifdef CONFIG_DYNAMIC_FTRACE @@ -3906,6 +3918,7 @@ tracing_read_dyn_info(struct file *filp, char __user *ubuf, static const struct file_operations tracing_dyn_info_fops = { .open = tracing_open_generic, .read = tracing_read_dyn_info, + .llseek = generic_file_llseek, }; #endif @@ -4059,6 +4072,7 @@ static const struct file_operations trace_options_fops = { .open = tracing_open_generic, .read = trace_options_read, .write = trace_options_write, + .llseek = generic_file_llseek, }; static ssize_t @@ -4110,6 +4124,7 @@ static const struct file_operations trace_options_core_fops = { .open = tracing_open_generic, .read = trace_options_core_read, .write = trace_options_core_write, + .llseek = generic_file_llseek, }; struct dentry *trace_create_file(const char *name, -- cgit v1.1 From f2e005aaff4878a8ea93d5fb033a21389b72579a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Jul 2010 15:59:09 +0200 Subject: workqueue: fix mayday_mask handling on UP All cpumasks are assumed to have cpu 0 permanently set on UP, so it can't be used to signify whether there's something to be done for the CPU. workqueue was using cpumask to track which CPU requested rescuer assistance and this led rescuer thread to think there always are pending mayday requests on UP, which resulted in infinite busy loops. This patch fixes the problem by introducing mayday_mask_t and associated helpers which wrap cpumask on SMP and emulates its behavior using bitops and unsigned long on UP. Signed-off-by: Tejun Heo Cc: Rusty Russell --- kernel/workqueue.c | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 79a11e4..c11edc9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -186,6 +186,27 @@ struct wq_flusher { }; /* + * All cpumasks are assumed to be always set on UP and thus can't be + * used to determine whether there's something to be done. + */ +#ifdef CONFIG_SMP +typedef cpumask_var_t mayday_mask_t; +#define mayday_test_and_set_cpu(cpu, mask) \ + cpumask_test_and_set_cpu((cpu), (mask)) +#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) +#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) +#define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp)) +#define free_mayday_mask(mask) free_cpumask_var((mask)) +#else +typedef unsigned long mayday_mask_t; +#define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask)) +#define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask)) +#define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask)) +#define alloc_mayday_mask(maskp, gfp) true +#define free_mayday_mask(mask) do { } while (0) +#endif + +/* * The externally visible workqueue abstraction is an array of * per-CPU workqueues: */ @@ -206,7 +227,7 @@ struct workqueue_struct { struct list_head flusher_queue; /* F: flush waiters */ struct list_head flusher_overflow; /* F: flush overflow list */ - cpumask_var_t mayday_mask; /* cpus requesting rescue */ + mayday_mask_t mayday_mask; /* cpus requesting rescue */ struct worker *rescuer; /* I: rescue worker */ int saved_max_active; /* W: saved cwq max_active */ @@ -1387,7 +1408,7 @@ static bool send_mayday(struct work_struct *work) /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ if (cpu == WORK_CPU_UNBOUND) cpu = 0; - if (!cpumask_test_and_set_cpu(cpu, wq->mayday_mask)) + if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask)) wake_up_process(wq->rescuer->task); return true; } @@ -1915,14 +1936,14 @@ repeat: * See whether any cpu is asking for help. Unbounded * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND. */ - for_each_cpu(cpu, wq->mayday_mask) { + for_each_mayday_cpu(cpu, wq->mayday_mask) { unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); struct global_cwq *gcwq = cwq->gcwq; struct work_struct *work, *n; __set_current_state(TASK_RUNNING); - cpumask_clear_cpu(cpu, wq->mayday_mask); + mayday_clear_cpu(cpu, wq->mayday_mask); /* migrate to the target cpu if possible */ rescuer->gcwq = gcwq; @@ -2724,7 +2745,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, if (flags & WQ_RESCUER) { struct worker *rescuer; - if (!alloc_cpumask_var(&wq->mayday_mask, GFP_KERNEL)) + if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL)) goto err; wq->rescuer = rescuer = alloc_worker(); @@ -2759,7 +2780,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, err: if (wq) { free_cwqs(wq); - free_cpumask_var(wq->mayday_mask); + free_mayday_mask(wq->mayday_mask); kfree(wq->rescuer); kfree(wq); } @@ -2800,7 +2821,7 @@ void destroy_workqueue(struct workqueue_struct *wq) if (wq->flags & WQ_RESCUER) { kthread_stop(wq->rescuer->task); - free_cpumask_var(wq->mayday_mask); + free_mayday_mask(wq->mayday_mask); } free_cwqs(wq); -- cgit v1.1 From 70d4bf6d467a330ccc947df9b2608e329d9e7708 Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Tue, 20 Jul 2010 06:45:56 +0000 Subject: drop_monitor: convert some kfree_skb call sites to consume_skb Convert a few calls from kfree_skb to consume_skb Noticed while I was working on dropwatch that I was detecting lots of internal skb drops in several places. While some are legitimate, several were not, freeing skbs that were at the end of their life, rather than being discarded due to an error. This patch converts those calls sites from using kfree_skb to consume_skb, which quiets the in-kernel drop_monitor code from detecting them as drops. Tested successfully by myself Signed-off-by: Neil Horman Signed-off-by: David S. Miller --- kernel/audit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/audit.c b/kernel/audit.c index c71bd26..8296aa5 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -407,7 +407,7 @@ static void kauditd_send_skb(struct sk_buff *skb) audit_hold_skb(skb); } else /* drop the extra reference if sent ok */ - kfree_skb(skb); + consume_skb(skb); } static int kauditd_thread(void *dummy) -- cgit v1.1 From e870e9a1240bcef1157ffaaf71dac63362e71904 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 2 Jul 2010 11:07:32 +0800 Subject: tracing: Allow to disable cmdline recording We found that even enabling a single trace event that will rarely be triggered can add big overhead to context switch. (lmbench context switch test) ------------------------------------------------- 2p/0K 2p/16K 2p/64K 8p/16K 8p/64K 16p/16K 16p/64K ctxsw ctxsw ctxsw ctxsw ctxsw ctxsw ctxsw ------ ------ ------ ------ ------ ------- ------- 2.19 2.3 2.21 2.56 2.13 2.54 2.07 2.39 2.51 2.35 2.75 2.27 2.81 2.24 The overhead is 6% ~ 11%. It's because when a trace event is enabled 3 tracepoints (sched_switch, sched_wakeup, sched_wakeup_new) will be activated to map pid to cmdname. We'd like to avoid this overhead, so add a trace option '(no)record-cmd' to allow to disable cmdline recording. Signed-off-by: Li Zefan LKML-Reference: <4C2D57F4.2050204@cn.fujitsu.com> Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 6 +++++- kernel/trace/trace.h | 3 +++ kernel/trace/trace_events.c | 30 ++++++++++++++++++++++++++++-- 3 files changed, 36 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8683dec..af90429 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -344,7 +344,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); /* trace_flags holds trace_options default values */ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | - TRACE_ITER_GRAPH_TIME; + TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD; static int trace_stop_count; static DEFINE_SPINLOCK(tracing_start_lock); @@ -428,6 +428,7 @@ static const char *trace_options[] = { "latency-format", "sleep-time", "graph-time", + "record-cmd", NULL }; @@ -2561,6 +2562,9 @@ static void set_tracer_flags(unsigned int mask, int enabled) trace_flags |= mask; else trace_flags &= ~mask; + + if (mask == TRACE_ITER_RECORD_CMD) + trace_event_enable_cmd_record(enabled); } static ssize_t diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 84d3f12..7778f06 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -591,6 +591,7 @@ enum trace_iterator_flags { TRACE_ITER_LATENCY_FMT = 0x20000, TRACE_ITER_SLEEP_TIME = 0x40000, TRACE_ITER_GRAPH_TIME = 0x80000, + TRACE_ITER_RECORD_CMD = 0x100000, }; /* @@ -723,6 +724,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec, return 0; } +extern void trace_event_enable_cmd_record(bool enable); + extern struct mutex event_mutex; extern struct list_head ftrace_events; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index e8e6043..09b4fa6 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -170,6 +170,26 @@ int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type) } EXPORT_SYMBOL_GPL(ftrace_event_reg); +void trace_event_enable_cmd_record(bool enable) +{ + struct ftrace_event_call *call; + + mutex_lock(&event_mutex); + list_for_each_entry(call, &ftrace_events, list) { + if (!(call->flags & TRACE_EVENT_FL_ENABLED)) + continue; + + if (enable) { + tracing_start_cmdline_record(); + call->flags |= TRACE_EVENT_FL_RECORDED_CMD; + } else { + tracing_stop_cmdline_record(); + call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD; + } + } + mutex_unlock(&event_mutex); +} + static int ftrace_event_enable_disable(struct ftrace_event_call *call, int enable) { @@ -179,13 +199,19 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call, case 0: if (call->flags & TRACE_EVENT_FL_ENABLED) { call->flags &= ~TRACE_EVENT_FL_ENABLED; - tracing_stop_cmdline_record(); + if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) { + tracing_stop_cmdline_record(); + call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD; + } call->class->reg(call, TRACE_REG_UNREGISTER); } break; case 1: if (!(call->flags & TRACE_EVENT_FL_ENABLED)) { - tracing_start_cmdline_record(); + if (trace_flags & TRACE_ITER_RECORD_CMD) { + tracing_start_cmdline_record(); + call->flags |= TRACE_EVENT_FL_RECORDED_CMD; + } ret = call->class->reg(call, TRACE_REG_REGISTER); if (ret) { tracing_stop_cmdline_record(); -- cgit v1.1 From 985023dee6e212493831431ba2e3ce8918f001b2 Mon Sep 17 00:00:00 2001 From: Richard Kennedy Date: Thu, 25 Mar 2010 11:27:36 +0000 Subject: trace: Reorder struct ring_buffer_per_cpu to remove padding on 64bit Reorder structure to remove 8 bytes of padding on 64 bit builds. This shrinks the size to 128 bytes so allowing allocation from a smaller slab & needed one fewer cache lines. Signed-off-by: Richard Kennedy LKML-Reference: <1269516456.2054.8.camel@localhost> Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 28d0615..3632ce8 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -443,6 +443,7 @@ int ring_buffer_print_page_header(struct trace_seq *s) */ struct ring_buffer_per_cpu { int cpu; + atomic_t record_disabled; struct ring_buffer *buffer; spinlock_t reader_lock; /* serialize readers */ arch_spinlock_t lock; @@ -462,7 +463,6 @@ struct ring_buffer_per_cpu { unsigned long read; u64 write_stamp; u64 read_stamp; - atomic_t record_disabled; }; struct ring_buffer { -- cgit v1.1 From bc289ae98b75d93228d24f521ef02a076e506e94 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 3 Jun 2010 18:26:24 +0800 Subject: tracing: Reduce latency and remove percpu trace_seq __print_flags() and __print_symbolic() use percpu trace_seq: 1) Its memory is allocated at compile time, it wastes memory if we don't use tracing. 2) It is percpu data and it wastes more memory for multi-cpus system. 3) It disables preemption when it executes its core routine "trace_seq_printf(s, "%s: ", #call);" and introduces latency. So we move this trace_seq to struct trace_iterator. Signed-off-by: Lai Jiangshan LKML-Reference: <4C078350.7090106@cn.fujitsu.com> Signed-off-by: Steven Rostedt --- kernel/trace/trace_output.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 57c1b45..1ba64d3 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -16,9 +16,6 @@ DECLARE_RWSEM(trace_event_mutex); -DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq); -EXPORT_PER_CPU_SYMBOL(ftrace_event_seq); - static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; static int next_event_type = __TRACE_LAST_TYPE + 1; -- cgit v1.1 From ef710e100c1068d3dd5774d2b34c5485219e06ce Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Thu, 1 Jul 2010 14:34:35 +0900 Subject: tracing: Shrink max latency ringbuffer if unnecessary Documentation/trace/ftrace.txt says buffer_size_kb: This sets or displays the number of kilobytes each CPU buffer can hold. The tracer buffers are the same size for each CPU. The displayed number is the size of the CPU buffer and not total size of all buffers. The trace buffers are allocated in pages (blocks of memory that the kernel uses for allocation, usually 4 KB in size). If the last page allocated has room for more bytes than requested, the rest of the page will be used, making the actual allocation bigger than requested. ( Note, the size may not be a multiple of the page size due to buffer management overhead. ) This can only be updated when the current_tracer is set to "nop". But it's incorrect. currently total memory consumption is 'buffer_size_kb x CPUs x 2'. Why two times difference is there? because ftrace implicitly allocate the buffer for max latency too. That makes sad result when admin want to use large buffer. (If admin want full logging and makes detail analysis). example, If admin have 24 CPUs machine and write 200MB to buffer_size_kb, the system consume ~10GB memory (200MB x 24 x 2). umm.. 5GB memory waste is usually unacceptable. Fortunatelly, almost all users don't use max latency feature. The max latency buffer can be disabled easily. This patch shrink buffer size of the max latency buffer if unnecessary. Signed-off-by: KOSAKI Motohiro LKML-Reference: <20100701104554.DA2D.A69D9226@jp.fujitsu.com> Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 38 ++++++++++++++++++++++++++++++++------ kernel/trace/trace.h | 1 + kernel/trace/trace_irqsoff.c | 3 +++ kernel/trace/trace_sched_wakeup.c | 2 ++ 4 files changed, 38 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index af90429..f7488f4 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -660,6 +660,10 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) return; WARN_ON_ONCE(!irqs_disabled()); + if (!current_trace->use_max_tr) { + WARN_ON_ONCE(1); + return; + } arch_spin_lock(&ftrace_max_lock); tr->buffer = max_tr.buffer; @@ -686,6 +690,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) return; WARN_ON_ONCE(!irqs_disabled()); + if (!current_trace->use_max_tr) { + WARN_ON_ONCE(1); + return; + } + arch_spin_lock(&ftrace_max_lock); ftrace_disable_cpu(); @@ -2801,6 +2810,9 @@ static int tracing_resize_ring_buffer(unsigned long size) if (ret < 0) return ret; + if (!current_trace->use_max_tr) + goto out; + ret = ring_buffer_resize(max_tr.buffer, size); if (ret < 0) { int r; @@ -2828,11 +2840,14 @@ static int tracing_resize_ring_buffer(unsigned long size) return ret; } + max_tr.entries = size; + out: global_trace.entries = size; return ret; } + /** * tracing_update_buffers - used by tracing facility to expand ring buffers * @@ -2893,12 +2908,26 @@ static int tracing_set_tracer(const char *buf) trace_branch_disable(); if (current_trace && current_trace->reset) current_trace->reset(tr); - + if (current_trace && current_trace->use_max_tr) { + /* + * We don't free the ring buffer. instead, resize it because + * The max_tr ring buffer has some state (e.g. ring->clock) and + * we want preserve it. + */ + ring_buffer_resize(max_tr.buffer, 1); + max_tr.entries = 1; + } destroy_trace_option_files(topts); current_trace = t; topts = create_trace_option_files(current_trace); + if (current_trace->use_max_tr) { + ret = ring_buffer_resize(max_tr.buffer, global_trace.entries); + if (ret < 0) + goto out; + max_tr.entries = global_trace.entries; + } if (t->init) { ret = tracer_init(t, tr); @@ -3480,7 +3509,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, } tracing_start(); - max_tr.entries = global_trace.entries; mutex_unlock(&trace_types_lock); return cnt; @@ -4578,16 +4606,14 @@ __init static int tracer_alloc_buffers(void) #ifdef CONFIG_TRACER_MAX_TRACE - max_tr.buffer = ring_buffer_alloc(ring_buf_size, - TRACE_BUFFER_FLAGS); + max_tr.buffer = ring_buffer_alloc(1, TRACE_BUFFER_FLAGS); if (!max_tr.buffer) { printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); WARN_ON(1); ring_buffer_free(global_trace.buffer); goto out_free_cpumask; } - max_tr.entries = ring_buffer_size(max_tr.buffer); - WARN_ON(max_tr.entries != global_trace.entries); + max_tr.entries = 1; #endif /* Allocate the first page for all buffers */ diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 7778f06..cb629b3 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -276,6 +276,7 @@ struct tracer { struct tracer *next; int print_max; struct tracer_flags *flags; + int use_max_tr; }; diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 6fd486e..73a6b06 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -649,6 +649,7 @@ static struct tracer irqsoff_tracer __read_mostly = #endif .open = irqsoff_trace_open, .close = irqsoff_trace_close, + .use_max_tr = 1, }; # define register_irqsoff(trace) register_tracer(&trace) #else @@ -681,6 +682,7 @@ static struct tracer preemptoff_tracer __read_mostly = #endif .open = irqsoff_trace_open, .close = irqsoff_trace_close, + .use_max_tr = 1, }; # define register_preemptoff(trace) register_tracer(&trace) #else @@ -715,6 +717,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly = #endif .open = irqsoff_trace_open, .close = irqsoff_trace_close, + .use_max_tr = 1, }; # define register_preemptirqsoff(trace) register_tracer(&trace) diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index c9fd5bd..4086eae6 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -382,6 +382,7 @@ static struct tracer wakeup_tracer __read_mostly = #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif + .use_max_tr = 1, }; static struct tracer wakeup_rt_tracer __read_mostly = @@ -396,6 +397,7 @@ static struct tracer wakeup_rt_tracer __read_mostly = #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif + .use_max_tr = 1, }; __init static int init_wakeup_tracer(void) -- cgit v1.1 From eebef74695e1498e04e5f85be9c6f84bd2e7358a Mon Sep 17 00:00:00 2001 From: Josh Hunt Date: Mon, 19 Jul 2010 12:31:16 -0700 Subject: sched: Use correct macro to display sched_child_runs_first in /proc/sched_debug The sched_child_runs_first value in /proc/sched_debug is currently displayed using a macro meant to split ns time values. This patch uses the correct macro to display it as a plain decimal value. Signed-off-by: Josh Hunt Cc: peterz@infradead.org Cc: juhlenko@akamai.com Cc: efault@gmx.de LKML-Reference: <1279567876-25418-1-git-send-email-johunt@akamai.com> Signed-off-by: Ingo Molnar --- kernel/sched_debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 3556539..2e1b0d1 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -332,7 +332,7 @@ static int sched_debug_show(struct seq_file *m, void *v) PN(sysctl_sched_latency); PN(sysctl_sched_min_granularity); PN(sysctl_sched_wakeup_granularity); - PN(sysctl_sched_child_runs_first); + P(sysctl_sched_child_runs_first); P(sysctl_sched_features); #undef PN #undef P -- cgit v1.1 From 24a461d537f49f9da6533d83100999ea08c6c755 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 10 Jul 2010 12:06:44 +0200 Subject: trace: strlen() return doesn't account for the NULL We need to add one to the strlen() return because of the NULL character. The type->name here generally comes from the kernel and I don't think any of them come close to being MAX_TRACER_SIZE (100) characters long so this is basically a cleanup. Signed-off-by: Dan Carpenter LKML-Reference: <20100710100644.GV19184@bicker> Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f7488f4..cacb6f0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -739,7 +739,7 @@ __acquires(kernel_lock) return -1; } - if (strlen(type->name) > MAX_TRACER_SIZE) { + if (strlen(type->name) >= MAX_TRACER_SIZE) { pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); return -1; } -- cgit v1.1 From e120153ddf8620fd0a194d301e9c5a8b28483bb5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 22 Jul 2010 14:14:25 +0200 Subject: workqueue: fix how cpu number is stored in work->data Once a work starts execution, its data contains the cpu number it was on instead of pointing to cwq. This is added by commit 7a22ad75 (workqueue: carry cpu number in work data once execution starts) to reliably determine the work was last on even if the workqueue itself was destroyed inbetween. Whether data points to a cwq or contains a cpu number was distinguished by comparing the value against PAGE_OFFSET. The assumption was that a cpu number should be below PAGE_OFFSET while a pointer to cwq should be above it. However, on architectures which use separate address spaces for user and kernel spaces, this doesn't hold as PAGE_OFFSET is zero. Fix it by using an explicit flag, WORK_STRUCT_CWQ, to mark what the data field contains. If the flag is set, it's pointing to a cwq; otherwise, it contains a cpu number. Reported on s390 and microblaze during linux-next testing. Signed-off-by: Tejun Heo Reported-by: Sachin Sant Reported-by: Michal Simek Reported-by: Martin Schwidefsky Tested-by: Martin Schwidefsky Tested-by: Michal Simek --- kernel/workqueue.c | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c11edc9..e5cb7fa 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -468,10 +468,9 @@ static int work_next_color(int color) } /* - * Work data points to the cwq while a work is on queue. Once - * execution starts, it points to the cpu the work was last on. This - * can be distinguished by comparing the data value against - * PAGE_OFFSET. + * A work's data points to the cwq with WORK_STRUCT_CWQ set while the + * work is on queue. Once execution starts, WORK_STRUCT_CWQ is + * cleared and the work data contains the cpu number it was last on. * * set_work_{cwq|cpu}() and clear_work_data() can be used to set the * cwq, cpu or clear work->data. These functions should only be @@ -494,7 +493,7 @@ static void set_work_cwq(struct work_struct *work, unsigned long extra_flags) { set_work_data(work, (unsigned long)cwq, - WORK_STRUCT_PENDING | extra_flags); + WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags); } static void set_work_cpu(struct work_struct *work, unsigned int cpu) @@ -507,25 +506,24 @@ static void clear_work_data(struct work_struct *work) set_work_data(work, WORK_STRUCT_NO_CPU, 0); } -static inline unsigned long get_work_data(struct work_struct *work) -{ - return atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK; -} - static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work) { - unsigned long data = get_work_data(work); + unsigned long data = atomic_long_read(&work->data); - return data >= PAGE_OFFSET ? (void *)data : NULL; + if (data & WORK_STRUCT_CWQ) + return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); + else + return NULL; } static struct global_cwq *get_work_gcwq(struct work_struct *work) { - unsigned long data = get_work_data(work); + unsigned long data = atomic_long_read(&work->data); unsigned int cpu; - if (data >= PAGE_OFFSET) - return ((struct cpu_workqueue_struct *)data)->gcwq; + if (data & WORK_STRUCT_CWQ) + return ((struct cpu_workqueue_struct *) + (data & WORK_STRUCT_WQ_DATA_MASK))->gcwq; cpu = data >> WORK_STRUCT_FLAG_BITS; if (cpu == WORK_CPU_NONE) @@ -3501,14 +3499,6 @@ void __init init_workqueues(void) unsigned int cpu; int i; - /* - * The pointer part of work->data is either pointing to the - * cwq or contains the cpu number the work ran last on. Make - * sure cpu number won't overflow into kernel pointer area so - * that they can be distinguished. - */ - BUILD_BUG_ON(WORK_CPU_LAST << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET); - hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); /* initialize gcwqs */ -- cgit v1.1 From 181a51f6e040d0ac006d6adaf4a031ffa440f41c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Jul 2010 22:09:02 +0200 Subject: slow-work: kill it slow-work doesn't have any user left. Kill it. Signed-off-by: Tejun Heo Acked-by: David Howells --- kernel/Makefile | 2 - kernel/slow-work-debugfs.c | 227 ---------- kernel/slow-work.c | 1068 -------------------------------------------- kernel/slow-work.h | 72 --- kernel/sysctl.c | 8 - 5 files changed, 1377 deletions(-) delete mode 100644 kernel/slow-work-debugfs.c delete mode 100644 kernel/slow-work.c delete mode 100644 kernel/slow-work.h (limited to 'kernel') diff --git a/kernel/Makefile b/kernel/Makefile index 057472f..2484ac3 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -99,8 +99,6 @@ obj-$(CONFIG_TRACING) += trace/ obj-$(CONFIG_X86_DS) += trace/ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o -obj-$(CONFIG_SLOW_WORK) += slow-work.o -obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o diff --git a/kernel/slow-work-debugfs.c b/kernel/slow-work-debugfs.c deleted file mode 100644 index e45c436..0000000 --- a/kernel/slow-work-debugfs.c +++ /dev/null @@ -1,227 +0,0 @@ -/* Slow work debugging - * - * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include "slow-work.h" - -#define ITERATOR_SHIFT (BITS_PER_LONG - 4) -#define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT) -#define ITERATOR_COUNTER (~ITERATOR_SELECTOR) - -void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m) -{ - seq_puts(m, "Slow-work: New thread"); -} - -/* - * Render the time mark field on a work item into a 5-char time with units plus - * a space - */ -static void slow_work_print_mark(struct seq_file *m, struct slow_work *work) -{ - struct timespec now, diff; - - now = CURRENT_TIME; - diff = timespec_sub(now, work->mark); - - if (diff.tv_sec < 0) - seq_puts(m, " -ve "); - else if (diff.tv_sec == 0 && diff.tv_nsec < 1000) - seq_printf(m, "%3luns ", diff.tv_nsec); - else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000) - seq_printf(m, "%3luus ", diff.tv_nsec / 1000); - else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000) - seq_printf(m, "%3lums ", diff.tv_nsec / 1000000); - else if (diff.tv_sec <= 1) - seq_puts(m, " 1s "); - else if (diff.tv_sec < 60) - seq_printf(m, "%4lus ", diff.tv_sec); - else if (diff.tv_sec < 60 * 60) - seq_printf(m, "%4lum ", diff.tv_sec / 60); - else if (diff.tv_sec < 60 * 60 * 24) - seq_printf(m, "%4luh ", diff.tv_sec / 3600); - else - seq_puts(m, "exces "); -} - -/* - * Describe a slow work item for debugfs - */ -static int slow_work_runqueue_show(struct seq_file *m, void *v) -{ - struct slow_work *work; - struct list_head *p = v; - unsigned long id; - - switch ((unsigned long) v) { - case 1: - seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n"); - return 0; - case 2: - seq_puts(m, "=== ===== ================ == ===== ==========\n"); - return 0; - - case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1: - id = (unsigned long) v - 3; - - read_lock(&slow_work_execs_lock); - work = slow_work_execs[id]; - if (work) { - smp_read_barrier_depends(); - - seq_printf(m, "%3lu %5d %16p %2lx ", - id, slow_work_pids[id], work, work->flags); - slow_work_print_mark(m, work); - - if (work->ops->desc) - work->ops->desc(work, m); - seq_putc(m, '\n'); - } - read_unlock(&slow_work_execs_lock); - return 0; - - default: - work = list_entry(p, struct slow_work, link); - seq_printf(m, "%3s - %16p %2lx ", - work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq", - work, work->flags); - slow_work_print_mark(m, work); - - if (work->ops->desc) - work->ops->desc(work, m); - seq_putc(m, '\n'); - return 0; - } -} - -/* - * map the iterator to a work item - */ -static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos) -{ - struct list_head *p; - unsigned long count, id; - - switch (*_pos >> ITERATOR_SHIFT) { - case 0x0: - if (*_pos == 0) - *_pos = 1; - if (*_pos < 3) - return (void *)(unsigned long) *_pos; - if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT) - for (id = *_pos - 3; - id < SLOW_WORK_THREAD_LIMIT; - id++, (*_pos)++) - if (slow_work_execs[id]) - return (void *)(unsigned long) *_pos; - *_pos = 0x1UL << ITERATOR_SHIFT; - - case 0x1: - count = *_pos & ITERATOR_COUNTER; - list_for_each(p, &slow_work_queue) { - if (count == 0) - return p; - count--; - } - *_pos = 0x2UL << ITERATOR_SHIFT; - - case 0x2: - count = *_pos & ITERATOR_COUNTER; - list_for_each(p, &vslow_work_queue) { - if (count == 0) - return p; - count--; - } - *_pos = 0x3UL << ITERATOR_SHIFT; - - default: - return NULL; - } -} - -/* - * set up the iterator to start reading from the first line - */ -static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos) -{ - spin_lock_irq(&slow_work_queue_lock); - return slow_work_runqueue_index(m, _pos); -} - -/* - * move to the next line - */ -static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos) -{ - struct list_head *p = v; - unsigned long selector = *_pos >> ITERATOR_SHIFT; - - (*_pos)++; - switch (selector) { - case 0x0: - return slow_work_runqueue_index(m, _pos); - - case 0x1: - if (*_pos >> ITERATOR_SHIFT == 0x1) { - p = p->next; - if (p != &slow_work_queue) - return p; - } - *_pos = 0x2UL << ITERATOR_SHIFT; - p = &vslow_work_queue; - - case 0x2: - if (*_pos >> ITERATOR_SHIFT == 0x2) { - p = p->next; - if (p != &vslow_work_queue) - return p; - } - *_pos = 0x3UL << ITERATOR_SHIFT; - - default: - return NULL; - } -} - -/* - * clean up after reading - */ -static void slow_work_runqueue_stop(struct seq_file *m, void *v) -{ - spin_unlock_irq(&slow_work_queue_lock); -} - -static const struct seq_operations slow_work_runqueue_ops = { - .start = slow_work_runqueue_start, - .stop = slow_work_runqueue_stop, - .next = slow_work_runqueue_next, - .show = slow_work_runqueue_show, -}; - -/* - * open "/sys/kernel/debug/slow_work/runqueue" to list queue contents - */ -static int slow_work_runqueue_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &slow_work_runqueue_ops); -} - -const struct file_operations slow_work_runqueue_fops = { - .owner = THIS_MODULE, - .open = slow_work_runqueue_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; diff --git a/kernel/slow-work.c b/kernel/slow-work.c deleted file mode 100644 index 7d3f4fa..0000000 --- a/kernel/slow-work.c +++ /dev/null @@ -1,1068 +0,0 @@ -/* Worker thread pool for slow items, such as filesystem lookups or mkdirs - * - * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - * - * See Documentation/slow-work.txt - */ - -#include -#include -#include -#include -#include -#include -#include "slow-work.h" - -static void slow_work_cull_timeout(unsigned long); -static void slow_work_oom_timeout(unsigned long); - -#ifdef CONFIG_SYSCTL -static int slow_work_min_threads_sysctl(struct ctl_table *, int, - void __user *, size_t *, loff_t *); - -static int slow_work_max_threads_sysctl(struct ctl_table *, int , - void __user *, size_t *, loff_t *); -#endif - -/* - * The pool of threads has at least min threads in it as long as someone is - * using the facility, and may have as many as max. - * - * A portion of the pool may be processing very slow operations. - */ -static unsigned slow_work_min_threads = 2; -static unsigned slow_work_max_threads = 4; -static unsigned vslow_work_proportion = 50; /* % of threads that may process - * very slow work */ - -#ifdef CONFIG_SYSCTL -static const int slow_work_min_min_threads = 2; -static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT; -static const int slow_work_min_vslow = 1; -static const int slow_work_max_vslow = 99; - -ctl_table slow_work_sysctls[] = { - { - .procname = "min-threads", - .data = &slow_work_min_threads, - .maxlen = sizeof(unsigned), - .mode = 0644, - .proc_handler = slow_work_min_threads_sysctl, - .extra1 = (void *) &slow_work_min_min_threads, - .extra2 = &slow_work_max_threads, - }, - { - .procname = "max-threads", - .data = &slow_work_max_threads, - .maxlen = sizeof(unsigned), - .mode = 0644, - .proc_handler = slow_work_max_threads_sysctl, - .extra1 = &slow_work_min_threads, - .extra2 = (void *) &slow_work_max_max_threads, - }, - { - .procname = "vslow-percentage", - .data = &vslow_work_proportion, - .maxlen = sizeof(unsigned), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = (void *) &slow_work_min_vslow, - .extra2 = (void *) &slow_work_max_vslow, - }, - {} -}; -#endif - -/* - * The active state of the thread pool - */ -static atomic_t slow_work_thread_count; -static atomic_t vslow_work_executing_count; - -static bool slow_work_may_not_start_new_thread; -static bool slow_work_cull; /* cull a thread due to lack of activity */ -static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0); -static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0); -static struct slow_work slow_work_new_thread; /* new thread starter */ - -/* - * slow work ID allocation (use slow_work_queue_lock) - */ -static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT); - -/* - * Unregistration tracking to prevent put_ref() from disappearing during module - * unload - */ -#ifdef CONFIG_MODULES -static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT]; -static struct module *slow_work_unreg_module; -static struct slow_work *slow_work_unreg_work_item; -static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq); -static DEFINE_MUTEX(slow_work_unreg_sync_lock); - -static void slow_work_set_thread_processing(int id, struct slow_work *work) -{ - if (work) - slow_work_thread_processing[id] = work->owner; -} -static void slow_work_done_thread_processing(int id, struct slow_work *work) -{ - struct module *module = slow_work_thread_processing[id]; - - slow_work_thread_processing[id] = NULL; - smp_mb(); - if (slow_work_unreg_work_item == work || - slow_work_unreg_module == module) - wake_up_all(&slow_work_unreg_wq); -} -static void slow_work_clear_thread_processing(int id) -{ - slow_work_thread_processing[id] = NULL; -} -#else -static void slow_work_set_thread_processing(int id, struct slow_work *work) {} -static void slow_work_done_thread_processing(int id, struct slow_work *work) {} -static void slow_work_clear_thread_processing(int id) {} -#endif - -/* - * Data for tracking currently executing items for indication through /proc - */ -#ifdef CONFIG_SLOW_WORK_DEBUG -struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT]; -pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT]; -DEFINE_RWLOCK(slow_work_execs_lock); -#endif - -/* - * The queues of work items and the lock governing access to them. These are - * shared between all the CPUs. It doesn't make sense to have per-CPU queues - * as the number of threads bears no relation to the number of CPUs. - * - * There are two queues of work items: one for slow work items, and one for - * very slow work items. - */ -LIST_HEAD(slow_work_queue); -LIST_HEAD(vslow_work_queue); -DEFINE_SPINLOCK(slow_work_queue_lock); - -/* - * The following are two wait queues that get pinged when a work item is placed - * on an empty queue. These allow work items that are hogging a thread by - * sleeping in a way that could be deferred to yield their thread and enqueue - * themselves. - */ -static DECLARE_WAIT_QUEUE_HEAD(slow_work_queue_waits_for_occupation); -static DECLARE_WAIT_QUEUE_HEAD(vslow_work_queue_waits_for_occupation); - -/* - * The thread controls. A variable used to signal to the threads that they - * should exit when the queue is empty, a waitqueue used by the threads to wait - * for signals, and a completion set by the last thread to exit. - */ -static bool slow_work_threads_should_exit; -static DECLARE_WAIT_QUEUE_HEAD(slow_work_thread_wq); -static DECLARE_COMPLETION(slow_work_last_thread_exited); - -/* - * The number of users of the thread pool and its lock. Whilst this is zero we - * have no threads hanging around, and when this reaches zero, we wait for all - * active or queued work items to complete and kill all the threads we do have. - */ -static int slow_work_user_count; -static DEFINE_MUTEX(slow_work_user_lock); - -static inline int slow_work_get_ref(struct slow_work *work) -{ - if (work->ops->get_ref) - return work->ops->get_ref(work); - - return 0; -} - -static inline void slow_work_put_ref(struct slow_work *work) -{ - if (work->ops->put_ref) - work->ops->put_ref(work); -} - -/* - * Calculate the maximum number of active threads in the pool that are - * permitted to process very slow work items. - * - * The answer is rounded up to at least 1, but may not equal or exceed the - * maximum number of the threads in the pool. This means we always have at - * least one thread that can process slow work items, and we always have at - * least one thread that won't get tied up doing so. - */ -static unsigned slow_work_calc_vsmax(void) -{ - unsigned vsmax; - - vsmax = atomic_read(&slow_work_thread_count) * vslow_work_proportion; - vsmax /= 100; - vsmax = max(vsmax, 1U); - return min(vsmax, slow_work_max_threads - 1); -} - -/* - * Attempt to execute stuff queued on a slow thread. Return true if we managed - * it, false if there was nothing to do. - */ -static noinline bool slow_work_execute(int id) -{ - struct slow_work *work = NULL; - unsigned vsmax; - bool very_slow; - - vsmax = slow_work_calc_vsmax(); - - /* see if we can schedule a new thread to be started if we're not - * keeping up with the work */ - if (!waitqueue_active(&slow_work_thread_wq) && - (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) && - atomic_read(&slow_work_thread_count) < slow_work_max_threads && - !slow_work_may_not_start_new_thread) - slow_work_enqueue(&slow_work_new_thread); - - /* find something to execute */ - spin_lock_irq(&slow_work_queue_lock); - if (!list_empty(&vslow_work_queue) && - atomic_read(&vslow_work_executing_count) < vsmax) { - work = list_entry(vslow_work_queue.next, - struct slow_work, link); - if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags)) - BUG(); - list_del_init(&work->link); - atomic_inc(&vslow_work_executing_count); - very_slow = true; - } else if (!list_empty(&slow_work_queue)) { - work = list_entry(slow_work_queue.next, - struct slow_work, link); - if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags)) - BUG(); - list_del_init(&work->link); - very_slow = false; - } else { - very_slow = false; /* avoid the compiler warning */ - } - - slow_work_set_thread_processing(id, work); - if (work) { - slow_work_mark_time(work); - slow_work_begin_exec(id, work); - } - - spin_unlock_irq(&slow_work_queue_lock); - - if (!work) - return false; - - if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) - BUG(); - - /* don't execute if the work is in the process of being cancelled */ - if (!test_bit(SLOW_WORK_CANCELLING, &work->flags)) - work->ops->execute(work); - - if (very_slow) - atomic_dec(&vslow_work_executing_count); - clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); - - /* wake up anyone waiting for this work to be complete */ - wake_up_bit(&work->flags, SLOW_WORK_EXECUTING); - - slow_work_end_exec(id, work); - - /* if someone tried to enqueue the item whilst we were executing it, - * then it'll be left unenqueued to avoid multiple threads trying to - * execute it simultaneously - * - * there is, however, a race between us testing the pending flag and - * getting the spinlock, and between the enqueuer setting the pending - * flag and getting the spinlock, so we use a deferral bit to tell us - * if the enqueuer got there first - */ - if (test_bit(SLOW_WORK_PENDING, &work->flags)) { - spin_lock_irq(&slow_work_queue_lock); - - if (!test_bit(SLOW_WORK_EXECUTING, &work->flags) && - test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) - goto auto_requeue; - - spin_unlock_irq(&slow_work_queue_lock); - } - - /* sort out the race between module unloading and put_ref() */ - slow_work_put_ref(work); - slow_work_done_thread_processing(id, work); - - return true; - -auto_requeue: - /* we must complete the enqueue operation - * - we transfer our ref on the item back to the appropriate queue - * - don't wake another thread up as we're awake already - */ - slow_work_mark_time(work); - if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) - list_add_tail(&work->link, &vslow_work_queue); - else - list_add_tail(&work->link, &slow_work_queue); - spin_unlock_irq(&slow_work_queue_lock); - slow_work_clear_thread_processing(id); - return true; -} - -/** - * slow_work_sleep_till_thread_needed - Sleep till thread needed by other work - * work: The work item under execution that wants to sleep - * _timeout: Scheduler sleep timeout - * - * Allow a requeueable work item to sleep on a slow-work processor thread until - * that thread is needed to do some other work or the sleep is interrupted by - * some other event. - * - * The caller must set up a wake up event before calling this and must have set - * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own - * condition before calling this function as no test is made here. - * - * False is returned if there is nothing on the queue; true is returned if the - * work item should be requeued - */ -bool slow_work_sleep_till_thread_needed(struct slow_work *work, - signed long *_timeout) -{ - wait_queue_head_t *wfo_wq; - struct list_head *queue; - - DEFINE_WAIT(wait); - - if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { - wfo_wq = &vslow_work_queue_waits_for_occupation; - queue = &vslow_work_queue; - } else { - wfo_wq = &slow_work_queue_waits_for_occupation; - queue = &slow_work_queue; - } - - if (!list_empty(queue)) - return true; - - add_wait_queue_exclusive(wfo_wq, &wait); - if (list_empty(queue)) - *_timeout = schedule_timeout(*_timeout); - finish_wait(wfo_wq, &wait); - - return !list_empty(queue); -} -EXPORT_SYMBOL(slow_work_sleep_till_thread_needed); - -/** - * slow_work_enqueue - Schedule a slow work item for processing - * @work: The work item to queue - * - * Schedule a slow work item for processing. If the item is already undergoing - * execution, this guarantees not to re-enter the execution routine until the - * first execution finishes. - * - * The item is pinned by this function as it retains a reference to it, managed - * through the item operations. The item is unpinned once it has been - * executed. - * - * An item may hog the thread that is running it for a relatively large amount - * of time, sufficient, for example, to perform several lookup, mkdir, create - * and setxattr operations. It may sleep on I/O and may sleep to obtain locks. - * - * Conversely, if a number of items are awaiting processing, it may take some - * time before any given item is given attention. The number of threads in the - * pool may be increased to deal with demand, but only up to a limit. - * - * If SLOW_WORK_VERY_SLOW is set on the work item, then it will be placed in - * the very slow queue, from which only a portion of the threads will be - * allowed to pick items to execute. This ensures that very slow items won't - * overly block ones that are just ordinarily slow. - * - * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is - * attempted queued) - */ -int slow_work_enqueue(struct slow_work *work) -{ - wait_queue_head_t *wfo_wq; - struct list_head *queue; - unsigned long flags; - int ret; - - if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) - return -ECANCELED; - - BUG_ON(slow_work_user_count <= 0); - BUG_ON(!work); - BUG_ON(!work->ops); - - /* when honouring an enqueue request, we only promise that we will run - * the work function in the future; we do not promise to run it once - * per enqueue request - * - * we use the PENDING bit to merge together repeat requests without - * having to disable IRQs and take the spinlock, whilst still - * maintaining our promise - */ - if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { - if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { - wfo_wq = &vslow_work_queue_waits_for_occupation; - queue = &vslow_work_queue; - } else { - wfo_wq = &slow_work_queue_waits_for_occupation; - queue = &slow_work_queue; - } - - spin_lock_irqsave(&slow_work_queue_lock, flags); - - if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags))) - goto cancelled; - - /* we promise that we will not attempt to execute the work - * function in more than one thread simultaneously - * - * this, however, leaves us with a problem if we're asked to - * enqueue the work whilst someone is executing the work - * function as simply queueing the work immediately means that - * another thread may try executing it whilst it is already - * under execution - * - * to deal with this, we set the ENQ_DEFERRED bit instead of - * enqueueing, and the thread currently executing the work - * function will enqueue the work item when the work function - * returns and it has cleared the EXECUTING bit - */ - if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { - set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); - } else { - ret = slow_work_get_ref(work); - if (ret < 0) - goto failed; - slow_work_mark_time(work); - list_add_tail(&work->link, queue); - wake_up(&slow_work_thread_wq); - - /* if someone who could be requeued is sleeping on a - * thread, then ask them to yield their thread */ - if (work->link.prev == queue) - wake_up(wfo_wq); - } - - spin_unlock_irqrestore(&slow_work_queue_lock, flags); - } - return 0; - -cancelled: - ret = -ECANCELED; -failed: - spin_unlock_irqrestore(&slow_work_queue_lock, flags); - return ret; -} -EXPORT_SYMBOL(slow_work_enqueue); - -static int slow_work_wait(void *word) -{ - schedule(); - return 0; -} - -/** - * slow_work_cancel - Cancel a slow work item - * @work: The work item to cancel - * - * This function will cancel a previously enqueued work item. If we cannot - * cancel the work item, it is guarenteed to have run when this function - * returns. - */ -void slow_work_cancel(struct slow_work *work) -{ - bool wait = true, put = false; - - set_bit(SLOW_WORK_CANCELLING, &work->flags); - smp_mb(); - - /* if the work item is a delayed work item with an active timer, we - * need to wait for the timer to finish _before_ getting the spinlock, - * lest we deadlock against the timer routine - * - * the timer routine will leave DELAYED set if it notices the - * CANCELLING flag in time - */ - if (test_bit(SLOW_WORK_DELAYED, &work->flags)) { - struct delayed_slow_work *dwork = - container_of(work, struct delayed_slow_work, work); - del_timer_sync(&dwork->timer); - } - - spin_lock_irq(&slow_work_queue_lock); - - if (test_bit(SLOW_WORK_DELAYED, &work->flags)) { - /* the timer routine aborted or never happened, so we are left - * holding the timer's reference on the item and should just - * drop the pending flag and wait for any ongoing execution to - * finish */ - struct delayed_slow_work *dwork = - container_of(work, struct delayed_slow_work, work); - - BUG_ON(timer_pending(&dwork->timer)); - BUG_ON(!list_empty(&work->link)); - - clear_bit(SLOW_WORK_DELAYED, &work->flags); - put = true; - clear_bit(SLOW_WORK_PENDING, &work->flags); - - } else if (test_bit(SLOW_WORK_PENDING, &work->flags) && - !list_empty(&work->link)) { - /* the link in the pending queue holds a reference on the item - * that we will need to release */ - list_del_init(&work->link); - wait = false; - put = true; - clear_bit(SLOW_WORK_PENDING, &work->flags); - - } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) { - /* the executor is holding our only reference on the item, so - * we merely need to wait for it to finish executing */ - clear_bit(SLOW_WORK_PENDING, &work->flags); - } - - spin_unlock_irq(&slow_work_queue_lock); - - /* the EXECUTING flag is set by the executor whilst the spinlock is set - * and before the item is dequeued - so assuming the above doesn't - * actually dequeue it, simply waiting for the EXECUTING flag to be - * released here should be sufficient */ - if (wait) - wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait, - TASK_UNINTERRUPTIBLE); - - clear_bit(SLOW_WORK_CANCELLING, &work->flags); - if (put) - slow_work_put_ref(work); -} -EXPORT_SYMBOL(slow_work_cancel); - -/* - * Handle expiry of the delay timer, indicating that a delayed slow work item - * should now be queued if not cancelled - */ -static void delayed_slow_work_timer(unsigned long data) -{ - wait_queue_head_t *wfo_wq; - struct list_head *queue; - struct slow_work *work = (struct slow_work *) data; - unsigned long flags; - bool queued = false, put = false, first = false; - - if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { - wfo_wq = &vslow_work_queue_waits_for_occupation; - queue = &vslow_work_queue; - } else { - wfo_wq = &slow_work_queue_waits_for_occupation; - queue = &slow_work_queue; - } - - spin_lock_irqsave(&slow_work_queue_lock, flags); - if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) { - clear_bit(SLOW_WORK_DELAYED, &work->flags); - - if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { - /* we discard the reference the timer was holding in - * favour of the one the executor holds */ - set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); - put = true; - } else { - slow_work_mark_time(work); - list_add_tail(&work->link, queue); - queued = true; - if (work->link.prev == queue) - first = true; - } - } - - spin_unlock_irqrestore(&slow_work_queue_lock, flags); - if (put) - slow_work_put_ref(work); - if (first) - wake_up(wfo_wq); - if (queued) - wake_up(&slow_work_thread_wq); -} - -/** - * delayed_slow_work_enqueue - Schedule a delayed slow work item for processing - * @dwork: The delayed work item to queue - * @delay: When to start executing the work, in jiffies from now - * - * This is similar to slow_work_enqueue(), but it adds a delay before the work - * is actually queued for processing. - * - * The item can have delayed processing requested on it whilst it is being - * executed. The delay will begin immediately, and if it expires before the - * item finishes executing, the item will be placed back on the queue when it - * has done executing. - */ -int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, - unsigned long delay) -{ - struct slow_work *work = &dwork->work; - unsigned long flags; - int ret; - - if (delay == 0) - return slow_work_enqueue(&dwork->work); - - BUG_ON(slow_work_user_count <= 0); - BUG_ON(!work); - BUG_ON(!work->ops); - - if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) - return -ECANCELED; - - if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { - spin_lock_irqsave(&slow_work_queue_lock, flags); - - if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) - goto cancelled; - - /* the timer holds a reference whilst it is pending */ - ret = slow_work_get_ref(work); - if (ret < 0) - goto cant_get_ref; - - if (test_and_set_bit(SLOW_WORK_DELAYED, &work->flags)) - BUG(); - dwork->timer.expires = jiffies + delay; - dwork->timer.data = (unsigned long) work; - dwork->timer.function = delayed_slow_work_timer; - add_timer(&dwork->timer); - - spin_unlock_irqrestore(&slow_work_queue_lock, flags); - } - - return 0; - -cancelled: - ret = -ECANCELED; -cant_get_ref: - spin_unlock_irqrestore(&slow_work_queue_lock, flags); - return ret; -} -EXPORT_SYMBOL(delayed_slow_work_enqueue); - -/* - * Schedule a cull of the thread pool at some time in the near future - */ -static void slow_work_schedule_cull(void) -{ - mod_timer(&slow_work_cull_timer, - round_jiffies(jiffies + SLOW_WORK_CULL_TIMEOUT)); -} - -/* - * Worker thread culling algorithm - */ -static bool slow_work_cull_thread(void) -{ - unsigned long flags; - bool do_cull = false; - - spin_lock_irqsave(&slow_work_queue_lock, flags); - - if (slow_work_cull) { - slow_work_cull = false; - - if (list_empty(&slow_work_queue) && - list_empty(&vslow_work_queue) && - atomic_read(&slow_work_thread_count) > - slow_work_min_threads) { - slow_work_schedule_cull(); - do_cull = true; - } - } - - spin_unlock_irqrestore(&slow_work_queue_lock, flags); - return do_cull; -} - -/* - * Determine if there is slow work available for dispatch - */ -static inline bool slow_work_available(int vsmax) -{ - return !list_empty(&slow_work_queue) || - (!list_empty(&vslow_work_queue) && - atomic_read(&vslow_work_executing_count) < vsmax); -} - -/* - * Worker thread dispatcher - */ -static int slow_work_thread(void *_data) -{ - int vsmax, id; - - DEFINE_WAIT(wait); - - set_freezable(); - set_user_nice(current, -5); - - /* allocate ourselves an ID */ - spin_lock_irq(&slow_work_queue_lock); - id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT); - BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT); - __set_bit(id, slow_work_ids); - slow_work_set_thread_pid(id, current->pid); - spin_unlock_irq(&slow_work_queue_lock); - - sprintf(current->comm, "kslowd%03u", id); - - for (;;) { - vsmax = vslow_work_proportion; - vsmax *= atomic_read(&slow_work_thread_count); - vsmax /= 100; - - prepare_to_wait_exclusive(&slow_work_thread_wq, &wait, - TASK_INTERRUPTIBLE); - if (!freezing(current) && - !slow_work_threads_should_exit && - !slow_work_available(vsmax) && - !slow_work_cull) - schedule(); - finish_wait(&slow_work_thread_wq, &wait); - - try_to_freeze(); - - vsmax = vslow_work_proportion; - vsmax *= atomic_read(&slow_work_thread_count); - vsmax /= 100; - - if (slow_work_available(vsmax) && slow_work_execute(id)) { - cond_resched(); - if (list_empty(&slow_work_queue) && - list_empty(&vslow_work_queue) && - atomic_read(&slow_work_thread_count) > - slow_work_min_threads) - slow_work_schedule_cull(); - continue; - } - - if (slow_work_threads_should_exit) - break; - - if (slow_work_cull && slow_work_cull_thread()) - break; - } - - spin_lock_irq(&slow_work_queue_lock); - slow_work_set_thread_pid(id, 0); - __clear_bit(id, slow_work_ids); - spin_unlock_irq(&slow_work_queue_lock); - - if (atomic_dec_and_test(&slow_work_thread_count)) - complete_and_exit(&slow_work_last_thread_exited, 0); - return 0; -} - -/* - * Handle thread cull timer expiration - */ -static void slow_work_cull_timeout(unsigned long data) -{ - slow_work_cull = true; - wake_up(&slow_work_thread_wq); -} - -/* - * Start a new slow work thread - */ -static void slow_work_new_thread_execute(struct slow_work *work) -{ - struct task_struct *p; - - if (slow_work_threads_should_exit) - return; - - if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads) - return; - - if (!mutex_trylock(&slow_work_user_lock)) - return; - - slow_work_may_not_start_new_thread = true; - atomic_inc(&slow_work_thread_count); - p = kthread_run(slow_work_thread, NULL, "kslowd"); - if (IS_ERR(p)) { - printk(KERN_DEBUG "Slow work thread pool: OOM\n"); - if (atomic_dec_and_test(&slow_work_thread_count)) - BUG(); /* we're running on a slow work thread... */ - mod_timer(&slow_work_oom_timer, - round_jiffies(jiffies + SLOW_WORK_OOM_TIMEOUT)); - } else { - /* ratelimit the starting of new threads */ - mod_timer(&slow_work_oom_timer, jiffies + 1); - } - - mutex_unlock(&slow_work_user_lock); -} - -static const struct slow_work_ops slow_work_new_thread_ops = { - .owner = THIS_MODULE, - .execute = slow_work_new_thread_execute, -#ifdef CONFIG_SLOW_WORK_DEBUG - .desc = slow_work_new_thread_desc, -#endif -}; - -/* - * post-OOM new thread start suppression expiration - */ -static void slow_work_oom_timeout(unsigned long data) -{ - slow_work_may_not_start_new_thread = false; -} - -#ifdef CONFIG_SYSCTL -/* - * Handle adjustment of the minimum number of threads - */ -static int slow_work_min_threads_sysctl(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos) -{ - int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); - int n; - - if (ret == 0) { - mutex_lock(&slow_work_user_lock); - if (slow_work_user_count > 0) { - /* see if we need to start or stop threads */ - n = atomic_read(&slow_work_thread_count) - - slow_work_min_threads; - - if (n < 0 && !slow_work_may_not_start_new_thread) - slow_work_enqueue(&slow_work_new_thread); - else if (n > 0) - slow_work_schedule_cull(); - } - mutex_unlock(&slow_work_user_lock); - } - - return ret; -} - -/* - * Handle adjustment of the maximum number of threads - */ -static int slow_work_max_threads_sysctl(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos) -{ - int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); - int n; - - if (ret == 0) { - mutex_lock(&slow_work_user_lock); - if (slow_work_user_count > 0) { - /* see if we need to stop threads */ - n = slow_work_max_threads - - atomic_read(&slow_work_thread_count); - - if (n < 0) - slow_work_schedule_cull(); - } - mutex_unlock(&slow_work_user_lock); - } - - return ret; -} -#endif /* CONFIG_SYSCTL */ - -/** - * slow_work_register_user - Register a user of the facility - * @module: The module about to make use of the facility - * - * Register a user of the facility, starting up the initial threads if there - * aren't any other users at this point. This will return 0 if successful, or - * an error if not. - */ -int slow_work_register_user(struct module *module) -{ - struct task_struct *p; - int loop; - - mutex_lock(&slow_work_user_lock); - - if (slow_work_user_count == 0) { - printk(KERN_NOTICE "Slow work thread pool: Starting up\n"); - init_completion(&slow_work_last_thread_exited); - - slow_work_threads_should_exit = false; - slow_work_init(&slow_work_new_thread, - &slow_work_new_thread_ops); - slow_work_may_not_start_new_thread = false; - slow_work_cull = false; - - /* start the minimum number of threads */ - for (loop = 0; loop < slow_work_min_threads; loop++) { - atomic_inc(&slow_work_thread_count); - p = kthread_run(slow_work_thread, NULL, "kslowd"); - if (IS_ERR(p)) - goto error; - } - printk(KERN_NOTICE "Slow work thread pool: Ready\n"); - } - - slow_work_user_count++; - mutex_unlock(&slow_work_user_lock); - return 0; - -error: - if (atomic_dec_and_test(&slow_work_thread_count)) - complete(&slow_work_last_thread_exited); - if (loop > 0) { - printk(KERN_ERR "Slow work thread pool:" - " Aborting startup on ENOMEM\n"); - slow_work_threads_should_exit = true; - wake_up_all(&slow_work_thread_wq); - wait_for_completion(&slow_work_last_thread_exited); - printk(KERN_ERR "Slow work thread pool: Aborted\n"); - } - mutex_unlock(&slow_work_user_lock); - return PTR_ERR(p); -} -EXPORT_SYMBOL(slow_work_register_user); - -/* - * wait for all outstanding items from the calling module to complete - * - note that more items may be queued whilst we're waiting - */ -static void slow_work_wait_for_items(struct module *module) -{ -#ifdef CONFIG_MODULES - DECLARE_WAITQUEUE(myself, current); - struct slow_work *work; - int loop; - - mutex_lock(&slow_work_unreg_sync_lock); - add_wait_queue(&slow_work_unreg_wq, &myself); - - for (;;) { - spin_lock_irq(&slow_work_queue_lock); - - /* first of all, we wait for the last queued item in each list - * to be processed */ - list_for_each_entry_reverse(work, &vslow_work_queue, link) { - if (work->owner == module) { - set_current_state(TASK_UNINTERRUPTIBLE); - slow_work_unreg_work_item = work; - goto do_wait; - } - } - list_for_each_entry_reverse(work, &slow_work_queue, link) { - if (work->owner == module) { - set_current_state(TASK_UNINTERRUPTIBLE); - slow_work_unreg_work_item = work; - goto do_wait; - } - } - - /* then we wait for the items being processed to finish */ - slow_work_unreg_module = module; - smp_mb(); - for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) { - if (slow_work_thread_processing[loop] == module) - goto do_wait; - } - spin_unlock_irq(&slow_work_queue_lock); - break; /* okay, we're done */ - - do_wait: - spin_unlock_irq(&slow_work_queue_lock); - schedule(); - slow_work_unreg_work_item = NULL; - slow_work_unreg_module = NULL; - } - - remove_wait_queue(&slow_work_unreg_wq, &myself); - mutex_unlock(&slow_work_unreg_sync_lock); -#endif /* CONFIG_MODULES */ -} - -/** - * slow_work_unregister_user - Unregister a user of the facility - * @module: The module whose items should be cleared - * - * Unregister a user of the facility, killing all the threads if this was the - * last one. - * - * This waits for all the work items belonging to the nominated module to go - * away before proceeding. - */ -void slow_work_unregister_user(struct module *module) -{ - /* first of all, wait for all outstanding items from the calling module - * to complete */ - if (module) - slow_work_wait_for_items(module); - - /* then we can actually go about shutting down the facility if need - * be */ - mutex_lock(&slow_work_user_lock); - - BUG_ON(slow_work_user_count <= 0); - - slow_work_user_count--; - if (slow_work_user_count == 0) { - printk(KERN_NOTICE "Slow work thread pool: Shutting down\n"); - slow_work_threads_should_exit = true; - del_timer_sync(&slow_work_cull_timer); - del_timer_sync(&slow_work_oom_timer); - wake_up_all(&slow_work_thread_wq); - wait_for_completion(&slow_work_last_thread_exited); - printk(KERN_NOTICE "Slow work thread pool:" - " Shut down complete\n"); - } - - mutex_unlock(&slow_work_user_lock); -} -EXPORT_SYMBOL(slow_work_unregister_user); - -/* - * Initialise the slow work facility - */ -static int __init init_slow_work(void) -{ - unsigned nr_cpus = num_possible_cpus(); - - if (slow_work_max_threads < nr_cpus) - slow_work_max_threads = nr_cpus; -#ifdef CONFIG_SYSCTL - if (slow_work_max_max_threads < nr_cpus * 2) - slow_work_max_max_threads = nr_cpus * 2; -#endif -#ifdef CONFIG_SLOW_WORK_DEBUG - { - struct dentry *dbdir; - - dbdir = debugfs_create_dir("slow_work", NULL); - if (dbdir && !IS_ERR(dbdir)) - debugfs_create_file("runqueue", S_IFREG | 0400, dbdir, - NULL, &slow_work_runqueue_fops); - } -#endif - return 0; -} - -subsys_initcall(init_slow_work); diff --git a/kernel/slow-work.h b/kernel/slow-work.h deleted file mode 100644 index a29ebd1..0000000 --- a/kernel/slow-work.h +++ /dev/null @@ -1,72 +0,0 @@ -/* Slow work private definitions - * - * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of - * things to do */ -#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after - * OOM */ - -#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */ - -/* - * slow-work.c - */ -#ifdef CONFIG_SLOW_WORK_DEBUG -extern struct slow_work *slow_work_execs[]; -extern pid_t slow_work_pids[]; -extern rwlock_t slow_work_execs_lock; -#endif - -extern struct list_head slow_work_queue; -extern struct list_head vslow_work_queue; -extern spinlock_t slow_work_queue_lock; - -/* - * slow-work-debugfs.c - */ -#ifdef CONFIG_SLOW_WORK_DEBUG -extern const struct file_operations slow_work_runqueue_fops; - -extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *); -#endif - -/* - * Helper functions - */ -static inline void slow_work_set_thread_pid(int id, pid_t pid) -{ -#ifdef CONFIG_SLOW_WORK_DEBUG - slow_work_pids[id] = pid; -#endif -} - -static inline void slow_work_mark_time(struct slow_work *work) -{ -#ifdef CONFIG_SLOW_WORK_DEBUG - work->mark = CURRENT_TIME; -#endif -} - -static inline void slow_work_begin_exec(int id, struct slow_work *work) -{ -#ifdef CONFIG_SLOW_WORK_DEBUG - slow_work_execs[id] = work; -#endif -} - -static inline void slow_work_end_exec(int id, struct slow_work *work) -{ -#ifdef CONFIG_SLOW_WORK_DEBUG - write_lock(&slow_work_execs_lock); - slow_work_execs[id] = NULL; - write_unlock(&slow_work_execs_lock); -#endif -} diff --git a/kernel/sysctl.c b/kernel/sysctl.c index d24f761f..5821365 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -50,7 +50,6 @@ #include #include #include -#include #include #include #include @@ -906,13 +905,6 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec, }, #endif -#ifdef CONFIG_SLOW_WORK - { - .procname = "slow-work", - .mode = 0555, - .child = slow_work_sysctls, - }, -#endif #ifdef CONFIG_PERF_EVENTS { .procname = "perf_event_paranoid", -- cgit v1.1 From 866e26115cba6b59cec669b6307599e3e4440491 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 20 Jul 2010 15:23:15 -0700 Subject: timers: Document meaning of deferrable timer Steal some text from 6e453a67510 "Add support for deferrable timers". A reader shouldn't have to dig through the git logs for the basic description of a deferrable timer. Signed-off-by: J. Bruce Fields Cc: johnstul@us.ibm.com Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner --- kernel/timer.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/timer.c b/kernel/timer.c index ee305c8..ce98685 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -90,8 +90,13 @@ static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases; /* * Note that all tvec_bases are 2 byte aligned and lower bit of - * base in timer_list is guaranteed to be zero. Use the LSB for - * the new flag to indicate whether the timer is deferrable + * base in timer_list is guaranteed to be zero. Use the LSB to + * indicate whether the timer is deferrable. + * + * A deferrable timer will work normally when the system is busy, but + * will not cause a CPU to come out of idle just to service it; instead, + * the timer will be serviced when the CPU eventually wakes up with a + * subsequent non-deferrable timer. */ #define TBASE_DEFERRABLE_FLAG (0x1) -- cgit v1.1 From 22b8f15c2f7130bb0386f548428df2ffd4e81903 Mon Sep 17 00:00:00 2001 From: Patrick Pannuto Date: Mon, 19 Jul 2010 15:09:26 -0700 Subject: timer: Added usleep[_range] timer usleep[_range] are finer precision implementations of msleep and are designed to be drop-in replacements for udelay where a precise sleep / busy-wait is unnecessary. They also allow an easy interface to specify slack when a precise (ish) wakeup is unnecessary to help minimize wakeups Signed-off-by: Patrick Pannuto Cc: akinobu.mita@gmail.com Cc: sboyd@codeaurora.org Acked-by: Arjan van de Ven LKML-Reference: <4C44CDD2.1070708@codeaurora.org> Signed-off-by: Thomas Gleixner --- kernel/timer.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'kernel') diff --git a/kernel/timer.c b/kernel/timer.c index ce98685..f110f24 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1755,3 +1755,25 @@ unsigned long msleep_interruptible(unsigned int msecs) } EXPORT_SYMBOL(msleep_interruptible); + +static int __sched do_usleep_range(unsigned long min, unsigned long max) +{ + ktime_t kmin; + unsigned long delta; + + kmin = ktime_set(0, min * NSEC_PER_USEC); + delta = max - min; + return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL); +} + +/** + * usleep_range - Drop in replacement for udelay where wakeup is flexible + * @min: Minimum time in usecs to sleep + * @max: Maximum time in usecs to sleep + */ +void usleep_range(unsigned long min, unsigned long max) +{ + __set_current_state(TASK_UNINTERRUPTIBLE); + do_usleep_range(min, max); +} +EXPORT_SYMBOL(usleep_range); -- cgit v1.1 From 2b08de0073a5697cf84d6f448d6dbc6cf02fc6b5 Mon Sep 17 00:00:00 2001 From: Andrey Vagin Date: Tue, 20 Jul 2010 15:23:14 -0700 Subject: posix_timer: Move copy_to_user(created_timer_id) down in timer_create() According to Oleg Nesterov: We can move copy_to_user(created_timer_id) down after "if (timer_event_spec)" block too. (but before CLOCK_DISPATCH(), of course). Signed-off-by: Andrey Vagin Cc: Oleg Nesterov Cc: Pavel Emelyanov Cc: Stanislaw Gruszka Cc: Andrey Vagin Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner --- kernel/posix-timers.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index ad72342..9ca4973 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -560,11 +560,6 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, new_timer->it_clock = which_clock; new_timer->it_overrun = -1; - if (copy_to_user(created_timer_id, - &new_timer_id, sizeof (new_timer_id))) { - error = -EFAULT; - goto out; - } if (timer_event_spec) { if (copy_from_user(&event, timer_event_spec, sizeof (event))) { error = -EFAULT; @@ -590,6 +585,12 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, new_timer->sigq->info.si_tid = new_timer->it_id; new_timer->sigq->info.si_code = SI_TIMER; + if (copy_to_user(created_timer_id, + &new_timer_id, sizeof (new_timer_id))) { + error = -EFAULT; + goto out; + } + error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer)); if (error) goto out; -- cgit v1.1 From fad3a906d324c02b3c25ef51f702384154089846 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 20 Jul 2010 08:48:34 +0200 Subject: padata: Fix cpu index counting The counting of the cpu index got lost with a recent commit. This patch restores it. This fixes a hang of the parallel worker threads on cpu hotplug. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/padata.c b/kernel/padata.c index 526f9ea..4287868 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -408,6 +408,7 @@ static void padata_init_pqueues(struct parallel_data *pd) pqueue = per_cpu_ptr(pd->pqueue, cpu); pqueue->pd = pd; pqueue->cpu_index = cpu_index; + cpu_index++; __padata_list_init(&pqueue->reorder); __padata_list_init(&pqueue->parallel); -- cgit v1.1 From b89661dff525a46edb7ee8a4423b5212068c05c0 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 20 Jul 2010 08:49:20 +0200 Subject: padata: Allocate cpumask dependend recources in any case The cpumask separation work assumes the cpumask dependend recources present regardless of valid or invalid cpumasks. With this patch we allocate the cpumask dependend recources in any case. This fixes two NULL pointer dereference crashes in padata_replace and in padata_get_cpumask. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/padata.c b/kernel/padata.c index 4287868..6a51945 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -417,7 +417,7 @@ static void padata_init_pqueues(struct parallel_data *pd) } num_cpus = cpumask_weight(pd->cpumask.pcpu); - pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1; + pd->max_seq_nr = num_cpus ? (MAX_SEQ_NR / num_cpus) * num_cpus - 1 : 0; } /* Allocate and initialize the internal cpumask dependend resources. */ @@ -527,21 +527,19 @@ static void padata_replace(struct padata_instance *pinst, rcu_assign_pointer(pinst->pd, pd_new); synchronize_rcu(); - if (!pd_old) - goto out; - padata_flush_queues(pd_old); if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) notification_mask |= PADATA_CPU_PARALLEL; if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) notification_mask |= PADATA_CPU_SERIAL; + padata_flush_queues(pd_old); padata_free_pd(pd_old); + if (notification_mask) blocking_notifier_call_chain(&pinst->cpumask_change_notifier, notification_mask, pinst); -out: pinst->flags &= ~PADATA_RESET; } @@ -673,6 +671,7 @@ int __padata_set_cpumasks(struct padata_instance *pinst, struct parallel_data *pd = NULL; mutex_lock(&pinst->lock); + get_online_cpus(); valid = padata_validate_cpumask(pinst, pcpumask); if (!valid) { @@ -681,20 +680,16 @@ int __padata_set_cpumasks(struct padata_instance *pinst, } valid = padata_validate_cpumask(pinst, cbcpumask); - if (!valid) { + if (!valid) __padata_stop(pinst); - goto out_replace; - } - - get_online_cpus(); +out_replace: pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); if (!pd) { err = -ENOMEM; goto out; } -out_replace: cpumask_copy(pinst->cpumask.pcpu, pcpumask); cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); @@ -705,7 +700,6 @@ out_replace: out: put_online_cpus(); - mutex_unlock(&pinst->lock); return err; @@ -776,11 +770,8 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) if (cpumask_test_cpu(cpu, cpu_online_mask)) { if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || - !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) { + !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) __padata_stop(pinst); - padata_replace(pinst, pd); - goto out; - } pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, pinst->cpumask.cbcpu); @@ -790,7 +781,6 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) padata_replace(pinst, pd); } -out: return 0; } -- cgit v1.1 From 7424713b83587006da72da84d7922471e366faba Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 20 Jul 2010 08:51:25 +0200 Subject: padata: Check for valid cpumasks Now that we allow to change the cpumasks from userspace, we have to check for valid cpumasks in padata_do_parallel. This patch adds the necessary check. This fixes a division by zero crash if the parallel cpumask contains no active cpu. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/padata.c b/kernel/padata.c index 6a51945..7f895e2 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -114,7 +114,7 @@ int padata_do_parallel(struct padata_instance *pinst, pd = rcu_dereference(pinst->pd); err = -EINVAL; - if (!(pinst->flags & PADATA_INIT)) + if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) goto out; if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) -- cgit v1.1 From ce3bf7ab22527183634a76512d9854a38615e4d5 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Tue, 13 Jul 2010 17:56:19 -0700 Subject: time: Implement timespec_add After accidentally misusing timespec_add_safe, I wanted to make sure we don't accidently trip over that issue again, so I created a simple timespec_add() function which we can use to replace the instances of timespec_add_safe() that don't want the overflow detection. Signed-off-by: John Stultz LKML-Reference: <1279068988-21864-3-git-send-email-johnstul@us.ibm.com> Signed-off-by: Thomas Gleixner --- kernel/time/timekeeping.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index caf8d4d..623fe3d 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -579,9 +579,9 @@ static int timekeeping_resume(struct sys_device *dev) if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { ts = timespec_sub(ts, timekeeping_suspend_time); - xtime = timespec_add_safe(xtime, ts); + xtime = timespec_add(xtime, ts); wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); - total_sleep_time = timespec_add_safe(total_sleep_time, ts); + total_sleep_time = timespec_add(total_sleep_time, ts); } /* re-base the last cycle value */ timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); @@ -887,7 +887,7 @@ EXPORT_SYMBOL_GPL(getboottime); */ void monotonic_to_bootbased(struct timespec *ts) { - *ts = timespec_add_safe(*ts, total_sleep_time); + *ts = timespec_add(*ts, total_sleep_time); } EXPORT_SYMBOL_GPL(monotonic_to_bootbased); -- cgit v1.1 From 592913ecb87a9e06f98ddb55b298f1a66bf94c6b Mon Sep 17 00:00:00 2001 From: John Stultz Date: Tue, 13 Jul 2010 17:56:20 -0700 Subject: time: Kill off CONFIG_GENERIC_TIME Now that all arches have been converted over to use generic time via clocksources or arch_gettimeoffset(), we can remove the GENERIC_TIME config option and simplify the generic code. Signed-off-by: John Stultz LKML-Reference: <1279068988-21864-4-git-send-email-johnstul@us.ibm.com> Signed-off-by: Thomas Gleixner --- kernel/time.c | 16 -------------- kernel/time/Kconfig | 4 ++-- kernel/time/clocksource.c | 4 ++-- kernel/time/timekeeping.c | 55 ++++------------------------------------------- kernel/trace/Kconfig | 4 ++-- 5 files changed, 10 insertions(+), 73 deletions(-) (limited to 'kernel') diff --git a/kernel/time.c b/kernel/time.c index 848b1c2..ba9b338 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -300,22 +300,6 @@ struct timespec timespec_trunc(struct timespec t, unsigned gran) } EXPORT_SYMBOL(timespec_trunc); -#ifndef CONFIG_GENERIC_TIME -/* - * Simulate gettimeofday using do_gettimeofday which only allows a timeval - * and therefore only yields usec accuracy - */ -void getnstimeofday(struct timespec *tv) -{ - struct timeval x; - - do_gettimeofday(&x); - tv->tv_sec = x.tv_sec; - tv->tv_nsec = x.tv_usec * NSEC_PER_USEC; -} -EXPORT_SYMBOL_GPL(getnstimeofday); -#endif - /* Converts Gregorian date to seconds since 1970-01-01 00:00:00. * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 * => year=1980, mon=12, day=31, hour=23, min=59, sec=59. diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index 95ed429..f06a8a3 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig @@ -6,7 +6,7 @@ config TICK_ONESHOT config NO_HZ bool "Tickless System (Dynamic Ticks)" - depends on GENERIC_TIME && GENERIC_CLOCKEVENTS + depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS select TICK_ONESHOT help This option enables a tickless system: timer interrupts will @@ -15,7 +15,7 @@ config NO_HZ config HIGH_RES_TIMERS bool "High Resolution Timer Support" - depends on GENERIC_TIME && GENERIC_CLOCKEVENTS + depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS select TICK_ONESHOT help This option enables high resolution timer support. If your diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index f08e99c..c543d21 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -531,7 +531,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs) return max_nsecs - (max_nsecs >> 5); } -#ifdef CONFIG_GENERIC_TIME +#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET /** * clocksource_select - Select the best clocksource available @@ -577,7 +577,7 @@ static void clocksource_select(void) } } -#else /* CONFIG_GENERIC_TIME */ +#else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */ static inline void clocksource_select(void) { } diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 623fe3d..73edd40 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -173,8 +173,6 @@ void timekeeping_leap_insert(int leapsecond) update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); } -#ifdef CONFIG_GENERIC_TIME - /** * timekeeping_forward_now - update clock to the current time * @@ -376,52 +374,6 @@ void timekeeping_notify(struct clocksource *clock) tick_clock_notify(); } -#else /* GENERIC_TIME */ - -static inline void timekeeping_forward_now(void) { } - -/** - * ktime_get - get the monotonic time in ktime_t format - * - * returns the time in ktime_t format - */ -ktime_t ktime_get(void) -{ - struct timespec now; - - ktime_get_ts(&now); - - return timespec_to_ktime(now); -} -EXPORT_SYMBOL_GPL(ktime_get); - -/** - * ktime_get_ts - get the monotonic clock in timespec format - * @ts: pointer to timespec variable - * - * The function calculates the monotonic clock from the realtime - * clock and the wall_to_monotonic offset and stores the result - * in normalized timespec format in the variable pointed to by @ts. - */ -void ktime_get_ts(struct timespec *ts) -{ - struct timespec tomono; - unsigned long seq; - - do { - seq = read_seqbegin(&xtime_lock); - getnstimeofday(ts); - tomono = wall_to_monotonic; - - } while (read_seqretry(&xtime_lock, seq)); - - set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, - ts->tv_nsec + tomono.tv_nsec); -} -EXPORT_SYMBOL_GPL(ktime_get_ts); - -#endif /* !GENERIC_TIME */ - /** * ktime_get_real - get the real (wall-) time in ktime_t format * @@ -784,10 +736,11 @@ void update_wall_time(void) return; clock = timekeeper.clock; -#ifdef CONFIG_GENERIC_TIME - offset = (clock->read(clock) - clock->cycle_last) & clock->mask; -#else + +#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET offset = timekeeper.cycle_interval; +#else + offset = (clock->read(clock) - clock->cycle_last) & clock->mask; #endif timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 8b1797c..7531dda 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -153,7 +153,7 @@ config IRQSOFF_TRACER bool "Interrupts-off Latency Tracer" default n depends on TRACE_IRQFLAGS_SUPPORT - depends on GENERIC_TIME + depends on !ARCH_USES_GETTIMEOFFSET select TRACE_IRQFLAGS select GENERIC_TRACER select TRACER_MAX_TRACE @@ -175,7 +175,7 @@ config IRQSOFF_TRACER config PREEMPT_TRACER bool "Preemption-off Latency Tracer" default n - depends on GENERIC_TIME + depends on !ARCH_USES_GETTIMEOFFSET depends on PREEMPT select GENERIC_TRACER select TRACER_MAX_TRACE -- cgit v1.1 From 7615856ebfee52b080c22d263ca4debbd0df0ac1 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Tue, 13 Jul 2010 17:56:23 -0700 Subject: timkeeping: Fix update_vsyscall to provide wall_to_monotonic offset update_vsyscall() did not provide the wall_to_monotoinc offset, so arch specific implementations tend to reference wall_to_monotonic directly. This limits future cleanups in the timekeeping core, so this patch fixes the update_vsyscall interface to provide wall_to_monotonic, allowing wall_to_monotonic to be made static as planned in Documentation/feature-removal-schedule.txt Signed-off-by: John Stultz Cc: Martin Schwidefsky Cc: Anton Blanchard Cc: Paul Mackerras Cc: Tony Luck LKML-Reference: <1279068988-21864-7-git-send-email-johnstul@us.ibm.com> Signed-off-by: Thomas Gleixner --- kernel/time/timekeeping.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 73edd40..b15c3ac 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -170,7 +170,8 @@ void timekeeping_leap_insert(int leapsecond) { xtime.tv_sec += leapsecond; wall_to_monotonic.tv_sec -= leapsecond; - update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); + update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, + timekeeper.mult); } /** @@ -326,7 +327,8 @@ int do_settimeofday(struct timespec *tv) timekeeper.ntp_error = 0; ntp_clear(); - update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); + update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, + timekeeper.mult); write_sequnlock_irqrestore(&xtime_lock, flags); @@ -809,7 +811,8 @@ void update_wall_time(void) } /* check to see if there is a new clocksource to use */ - update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); + update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, + timekeeper.mult); } /** -- cgit v1.1 From 8ab4351a4c888016620f43bde605b3d0964af339 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Tue, 13 Jul 2010 17:56:25 -0700 Subject: hrtimer: Cleanup direct access to wall_to_monotonic Provides an accessor function to replace hrtimer.c's direct access of wall_to_monotonic. This will allow wall_to_monotonic to be made static as planned in Documentation/feature-removal-schedule.txt Signed-off-by: John Stultz LKML-Reference: <1279068988-21864-9-git-send-email-johnstul@us.ibm.com> Signed-off-by: Thomas Gleixner --- kernel/hrtimer.c | 9 ++++----- kernel/time/timekeeping.c | 5 +++++ 2 files changed, 9 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 5c69e99..809f48c 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -90,7 +90,7 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) do { seq = read_seqbegin(&xtime_lock); xts = __current_kernel_time(); - tom = wall_to_monotonic; + tom = __get_wall_to_monotonic(); } while (read_seqretry(&xtime_lock, seq)); xtim = timespec_to_ktime(xts); @@ -612,7 +612,7 @@ static int hrtimer_reprogram(struct hrtimer *timer, static void retrigger_next_event(void *arg) { struct hrtimer_cpu_base *base; - struct timespec realtime_offset; + struct timespec realtime_offset, wtm; unsigned long seq; if (!hrtimer_hres_active()) @@ -620,10 +620,9 @@ static void retrigger_next_event(void *arg) do { seq = read_seqbegin(&xtime_lock); - set_normalized_timespec(&realtime_offset, - -wall_to_monotonic.tv_sec, - -wall_to_monotonic.tv_nsec); + wtm = __get_wall_to_monotonic(); } while (read_seqretry(&xtime_lock, seq)); + set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec); base = &__get_cpu_var(hrtimer_bases); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index b15c3ac..fb61c2e 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -858,6 +858,11 @@ struct timespec __current_kernel_time(void) return xtime; } +struct timespec __get_wall_to_monotonic(void) +{ + return wall_to_monotonic; +} + struct timespec current_kernel_time(void) { struct timespec now; -- cgit v1.1 From 0fb86b06298b6cd3205cac2e68a499f269282dac Mon Sep 17 00:00:00 2001 From: John Stultz Date: Tue, 13 Jul 2010 17:56:26 -0700 Subject: timekeeping: Make xtime and wall_to_monotonic static This patch makes xtime and wall_to_monotonic static, as planned in Documentation/feature-removal-schedule.txt. This will allow for further cleanups to the timekeeping core. Signed-off-by: John Stultz LKML-Reference: <1279068988-21864-10-git-send-email-johnstul@us.ibm.com> Signed-off-by: Thomas Gleixner --- kernel/time/timekeeping.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index fb61c2e..e14c839 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -153,8 +153,8 @@ __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); * - wall_to_monotonic is no longer the boot time, getboottime must be * used instead. */ -struct timespec xtime __attribute__ ((aligned (16))); -struct timespec wall_to_monotonic __attribute__ ((aligned (16))); +static struct timespec xtime __attribute__ ((aligned (16))); +static struct timespec wall_to_monotonic __attribute__ ((aligned (16))); static struct timespec total_sleep_time; /* -- cgit v1.1 From 852db46d55e85b475a72e665ca08d3317769ceef Mon Sep 17 00:00:00 2001 From: John Stultz Date: Tue, 13 Jul 2010 17:56:28 -0700 Subject: clocksource: Add __clocksource_updatefreq_hz/khz methods To properly handle clocksources that change frequencies at the clocksource->enable() point, this patch adds a method that will update the clocksource's mult/shift and max_idle_ns values. Signed-off-by: John Stultz LKML-Reference: <1279068988-21864-12-git-send-email-johnstul@us.ibm.com> Signed-off-by: Thomas Gleixner --- kernel/time/clocksource.c | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index c543d21..c18d7ef 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -639,19 +639,18 @@ static void clocksource_enqueue(struct clocksource *cs) #define MAX_UPDATE_LENGTH 5 /* Seconds */ /** - * __clocksource_register_scale - Used to install new clocksources + * __clocksource_updatefreq_scale - Used update clocksource with new freq * @t: clocksource to be registered * @scale: Scale factor multiplied against freq to get clocksource hz * @freq: clocksource frequency (cycles per second) divided by scale * - * Returns -EBUSY if registration fails, zero otherwise. + * This should only be called from the clocksource->enable() method. * * This *SHOULD NOT* be called directly! Please use the - * clocksource_register_hz() or clocksource_register_khz helper functions. + * clocksource_updatefreq_hz() or clocksource_updatefreq_khz helper functions. */ -int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) +void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) { - /* * Ideally we want to use some of the limits used in * clocksource_max_deferment, to provide a more informed @@ -662,7 +661,27 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) NSEC_PER_SEC/scale, MAX_UPDATE_LENGTH*scale); cs->max_idle_ns = clocksource_max_deferment(cs); +} +EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); + +/** + * __clocksource_register_scale - Used to install new clocksources + * @t: clocksource to be registered + * @scale: Scale factor multiplied against freq to get clocksource hz + * @freq: clocksource frequency (cycles per second) divided by scale + * + * Returns -EBUSY if registration fails, zero otherwise. + * + * This *SHOULD NOT* be called directly! Please use the + * clocksource_register_hz() or clocksource_register_khz helper functions. + */ +int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) +{ + + /* Intialize mult/shift and max_idle_ns */ + __clocksource_updatefreq_scale(cs, scale, freq); + /* Add clocksource to the clcoksource list */ mutex_lock(&clocksource_mutex); clocksource_enqueue(cs); clocksource_select(); -- cgit v1.1 From d7926ee38f5c6e0bbebe712304f99a4c67e40f84 Mon Sep 17 00:00:00 2001 From: Sridhar Samudrala Date: Sun, 30 May 2010 22:24:39 +0200 Subject: cgroups: Add an API to attach a task to current task's cgroup Add a new kernel API to attach a task to current task's cgroup in all the active hierarchies. Signed-off-by: Sridhar Samudrala Signed-off-by: Michael S. Tsirkin Reviewed-by: Paul Menage Acked-by: Li Zefan --- kernel/cgroup.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 422cb19..37642ad 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1788,6 +1788,29 @@ out: return retval; } +/** + * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup + * @tsk: the task to be attached + */ +int cgroup_attach_task_current_cg(struct task_struct *tsk) +{ + struct cgroupfs_root *root; + struct cgroup *cur_cg; + int retval = 0; + + cgroup_lock(); + for_each_active_root(root) { + cur_cg = task_cgroup_from_root(current, root); + retval = cgroup_attach_task(cur_cg, tsk); + if (retval) + break; + } + cgroup_unlock(); + + return retval; +} +EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg); + /* * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex * held. May take task_lock of task -- cgit v1.1 From ae7b8f4108bcffb42173f867ce845268c7202d48 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 20:12:04 -0500 Subject: Audit: clean up the audit_watch split No real changes, just cleanup to the audit_watch split patch which we done with minimal code changes for easy review. Now fix interfaces to make things work better. Signed-off-by: Eric Paris --- kernel/audit.c | 1 - kernel/audit.h | 13 ++++------ kernel/audit_watch.c | 67 ++++++++++++++++++++++++++++++---------------------- kernel/auditfilter.c | 41 ++++++++++++-------------------- kernel/auditsc.c | 5 ++-- 5 files changed, 60 insertions(+), 67 deletions(-) (limited to 'kernel') diff --git a/kernel/audit.c b/kernel/audit.c index c71bd26..05a32f0 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -56,7 +56,6 @@ #include #include #include -#include #include #include diff --git a/kernel/audit.h b/kernel/audit.h index 208687b..82c8a09 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -104,20 +104,15 @@ extern void audit_free_rule_rcu(struct rcu_head *); extern struct list_head audit_filter_list[]; /* audit watch functions */ -extern unsigned long audit_watch_inode(struct audit_watch *watch); -extern dev_t audit_watch_dev(struct audit_watch *watch); extern void audit_put_watch(struct audit_watch *watch); extern void audit_get_watch(struct audit_watch *watch); extern int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op); -extern int audit_add_watch(struct audit_krule *krule); -extern void audit_remove_watch(struct audit_watch *watch); +extern int audit_add_watch(struct audit_krule *krule, struct list_head **list); extern void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list); -extern void audit_inotify_unregister(struct list_head *in_list); +extern void audit_watch_inotify_unregister(struct list_head *in_list); extern char *audit_watch_path(struct audit_watch *watch); -extern struct list_head *audit_watch_rules(struct audit_watch *watch); - -extern struct audit_entry *audit_dupe_rule(struct audit_krule *old, - struct audit_watch *watch); +extern int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev); +extern struct audit_entry *audit_dupe_rule(struct audit_krule *old); #ifdef CONFIG_AUDIT_TREE extern struct audit_chunk *audit_tree_lookup(const struct inode *); diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 8df4369..c2ca716 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -51,12 +51,12 @@ struct audit_watch { unsigned long ino; /* associated inode number */ struct audit_parent *parent; /* associated parent */ struct list_head wlist; /* entry in parent->watches list */ - struct list_head rules; /* associated rules */ + struct list_head rules; /* anchor for krule->rlist */ }; struct audit_parent { - struct list_head ilist; /* entry in inotify registration list */ - struct list_head watches; /* associated watches */ + struct list_head ilist; /* tmp list used to free parents */ + struct list_head watches; /* anchor for audit_watch->wlist */ struct inotify_watch wdata; /* inotify watch data */ unsigned flags; /* status flags */ }; @@ -78,13 +78,18 @@ struct inotify_handle *audit_ih; /* Inotify events we care about. */ #define AUDIT_IN_WATCH IN_MOVE|IN_CREATE|IN_DELETE|IN_DELETE_SELF|IN_MOVE_SELF -static void audit_free_parent(struct inotify_watch *i_watch) +static void audit_free_parent(struct audit_parent *parent) +{ + WARN_ON(!list_empty(&parent->watches)); + kfree(parent); +} + +static void audit_destroy_watch(struct inotify_watch *i_watch) { struct audit_parent *parent; parent = container_of(i_watch, struct audit_parent, wdata); - WARN_ON(!list_empty(&parent->watches)); - kfree(parent); + audit_free_parent(parent); } void audit_get_watch(struct audit_watch *watch) @@ -115,19 +120,11 @@ char *audit_watch_path(struct audit_watch *watch) return watch->path; } -struct list_head *audit_watch_rules(struct audit_watch *watch) +int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev) { - return &watch->rules; -} - -unsigned long audit_watch_inode(struct audit_watch *watch) -{ - return watch->ino; -} - -dev_t audit_watch_dev(struct audit_watch *watch) -{ - return watch->dev; + return (watch->ino != (unsigned long)-1) && + (watch->ino == ino) && + (watch->dev == dev); } /* Initialize a parent watch entry. */ @@ -149,7 +146,7 @@ static struct audit_parent *audit_init_parent(struct nameidata *ndp) wd = inotify_add_watch(audit_ih, &parent->wdata, ndp->path.dentry->d_inode, AUDIT_IN_WATCH); if (wd < 0) { - audit_free_parent(&parent->wdata); + audit_free_parent(parent); return ERR_PTR(wd); } @@ -251,15 +248,19 @@ static void audit_update_watch(struct audit_parent *parent, struct audit_entry *oentry, *nentry; mutex_lock(&audit_filter_mutex); + /* Run all of the watches on this parent looking for the one that + * matches the given dname */ list_for_each_entry_safe(owatch, nextw, &parent->watches, wlist) { if (audit_compare_dname_path(dname, owatch->path, NULL)) continue; /* If the update involves invalidating rules, do the inode-based * filtering now, so we don't omit records. */ - if (invalidating && current->audit_context) + if (invalidating && !audit_dummy_context()) audit_filter_inodes(current, current->audit_context); + /* updating ino will likely change which audit_hash_list we + * are on so we need a new watch for the new list */ nwatch = audit_dupe_watch(owatch); if (IS_ERR(nwatch)) { mutex_unlock(&audit_filter_mutex); @@ -275,12 +276,21 @@ static void audit_update_watch(struct audit_parent *parent, list_del(&oentry->rule.rlist); list_del_rcu(&oentry->list); - nentry = audit_dupe_rule(&oentry->rule, nwatch); + nentry = audit_dupe_rule(&oentry->rule); if (IS_ERR(nentry)) { list_del(&oentry->rule.list); audit_panic("error updating watch, removing"); } else { int h = audit_hash_ino((u32)ino); + + /* + * nentry->rule.watch == oentry->rule.watch so + * we must drop that reference and set it to our + * new watch. + */ + audit_put_watch(nentry->rule.watch); + audit_get_watch(nwatch); + nentry->rule.watch = nwatch; list_add(&nentry->rule.rlist, &nwatch->rules); list_add_rcu(&nentry->list, &audit_inode_hash[h]); list_replace(&oentry->rule.list, @@ -329,14 +339,14 @@ static void audit_remove_parent_watches(struct audit_parent *parent) /* Unregister inotify watches for parents on in_list. * Generates an IN_IGNORED event. */ -void audit_inotify_unregister(struct list_head *in_list) +void audit_watch_inotify_unregister(struct list_head *in_list) { struct audit_parent *p, *n; list_for_each_entry_safe(p, n, in_list, ilist) { list_del(&p->ilist); inotify_rm_watch(audit_ih, &p->wdata); - /* the unpin matching the pin in audit_do_del_rule() */ + /* the unpin matching the pin in audit_remove_watch_rule() */ unpin_inotify_watch(&p->wdata); } } @@ -423,13 +433,13 @@ static void audit_add_to_parent(struct audit_krule *krule, /* Find a matching watch entry, or add this one. * Caller must hold audit_filter_mutex. */ -int audit_add_watch(struct audit_krule *krule) +int audit_add_watch(struct audit_krule *krule, struct list_head **list) { struct audit_watch *watch = krule->watch; struct inotify_watch *i_watch; struct audit_parent *parent; struct nameidata *ndp = NULL, *ndw = NULL; - int ret = 0; + int h, ret = 0; mutex_unlock(&audit_filter_mutex); @@ -475,6 +485,8 @@ int audit_add_watch(struct audit_krule *krule) /* match get in audit_init_parent or inotify_find_watch */ put_inotify_watch(&parent->wdata); + h = audit_hash_ino((u32)watch->ino); + *list = &audit_inode_hash[h]; error: audit_put_nd(ndp, ndw); /* NULL args OK */ return ret; @@ -514,8 +526,7 @@ static void audit_handle_ievent(struct inotify_watch *i_watch, u32 wd, u32 mask, parent = container_of(i_watch, struct audit_parent, wdata); if (mask & (IN_CREATE|IN_MOVED_TO) && inode) - audit_update_watch(parent, dname, inode->i_sb->s_dev, - inode->i_ino, 0); + audit_update_watch(parent, dname, inode->i_sb->s_dev, inode->i_ino, 0); else if (mask & (IN_DELETE|IN_MOVED_FROM)) audit_update_watch(parent, dname, (dev_t)-1, (unsigned long)-1, 1); /* inotify automatically removes the watch and sends IN_IGNORED */ @@ -531,7 +542,7 @@ static void audit_handle_ievent(struct inotify_watch *i_watch, u32 wd, u32 mask, static const struct inotify_operations audit_inotify_ops = { .handle_event = audit_handle_ievent, - .destroy_watch = audit_free_parent, + .destroy_watch = audit_destroy_watch, }; static int __init audit_watch_init(void) diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index ce08041..ac87577 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -71,6 +71,7 @@ static inline void audit_free_rule(struct audit_entry *e) { int i; struct audit_krule *erule = &e->rule; + /* some rules don't have associated watches */ if (erule->watch) audit_put_watch(erule->watch); @@ -746,8 +747,7 @@ static inline int audit_dupe_lsm_field(struct audit_field *df, * rule with the new rule in the filterlist, then free the old rule. * The rlist element is undefined; list manipulations are handled apart from * the initial copy. */ -struct audit_entry *audit_dupe_rule(struct audit_krule *old, - struct audit_watch *watch) +struct audit_entry *audit_dupe_rule(struct audit_krule *old) { u32 fcount = old->field_count; struct audit_entry *entry; @@ -769,8 +769,8 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old, new->prio = old->prio; new->buflen = old->buflen; new->inode_f = old->inode_f; - new->watch = NULL; new->field_count = old->field_count; + /* * note that we are OK with not refcounting here; audit_match_tree() * never dereferences tree and we can't get false positives there @@ -811,9 +811,9 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old, } } - if (watch) { - audit_get_watch(watch); - new->watch = watch; + if (old->watch) { + audit_get_watch(old->watch); + new->watch = old->watch; } return entry; @@ -866,7 +866,7 @@ static inline int audit_add_rule(struct audit_entry *entry) struct audit_watch *watch = entry->rule.watch; struct audit_tree *tree = entry->rule.tree; struct list_head *list; - int h, err; + int err; #ifdef CONFIG_AUDITSYSCALL int dont_count = 0; @@ -889,15 +889,11 @@ static inline int audit_add_rule(struct audit_entry *entry) if (watch) { /* audit_filter_mutex is dropped and re-taken during this call */ - err = audit_add_watch(&entry->rule); + err = audit_add_watch(&entry->rule, &list); if (err) { mutex_unlock(&audit_filter_mutex); goto error; } - /* entry->rule.watch may have changed during audit_add_watch() */ - watch = entry->rule.watch; - h = audit_hash_ino((u32)audit_watch_inode(watch)); - list = &audit_inode_hash[h]; } if (tree) { err = audit_add_tree_rule(&entry->rule); @@ -949,7 +945,7 @@ static inline int audit_del_rule(struct audit_entry *entry) struct audit_watch *watch = entry->rule.watch; struct audit_tree *tree = entry->rule.tree; struct list_head *list; - LIST_HEAD(inotify_list); + LIST_HEAD(inotify_unregister_list); int ret = 0; #ifdef CONFIG_AUDITSYSCALL int dont_count = 0; @@ -969,7 +965,7 @@ static inline int audit_del_rule(struct audit_entry *entry) } if (e->rule.watch) - audit_remove_watch_rule(&e->rule, &inotify_list); + audit_remove_watch_rule(&e->rule, &inotify_unregister_list); if (e->rule.tree) audit_remove_tree_rule(&e->rule); @@ -987,8 +983,8 @@ static inline int audit_del_rule(struct audit_entry *entry) #endif mutex_unlock(&audit_filter_mutex); - if (!list_empty(&inotify_list)) - audit_inotify_unregister(&inotify_list); + if (!list_empty(&inotify_unregister_list)) + audit_watch_inotify_unregister(&inotify_unregister_list); out: if (watch) @@ -1323,30 +1319,23 @@ static int update_lsm_rule(struct audit_krule *r) { struct audit_entry *entry = container_of(r, struct audit_entry, rule); struct audit_entry *nentry; - struct audit_watch *watch; - struct audit_tree *tree; int err = 0; if (!security_audit_rule_known(r)) return 0; - watch = r->watch; - tree = r->tree; - nentry = audit_dupe_rule(r, watch); + nentry = audit_dupe_rule(r); if (IS_ERR(nentry)) { /* save the first error encountered for the * return value */ err = PTR_ERR(nentry); audit_panic("error updating LSM filters"); - if (watch) + if (r->watch) list_del(&r->rlist); list_del_rcu(&entry->list); list_del(&r->list); } else { - if (watch) { - list_add(&nentry->rule.rlist, audit_watch_rules(watch)); - list_del(&r->rlist); - } else if (tree) + if (r->watch || r->tree) list_replace_init(&r->rlist, &nentry->rule.rlist); list_replace_rcu(&entry->list, &nentry->list); list_replace(&r->list, &nentry->rule.list); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 3828ad5..240063c 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -549,9 +549,8 @@ static int audit_filter_rules(struct task_struct *tsk, } break; case AUDIT_WATCH: - if (name && audit_watch_inode(rule->watch) != (unsigned long)-1) - result = (name->dev == audit_watch_dev(rule->watch) && - name->ino == audit_watch_inode(rule->watch)); + if (name) + result = audit_watch_compare(rule->watch, name->ino, name->dev); break; case AUDIT_DIR: if (ctx) -- cgit v1.1 From e9fd702a58c49dbb14481dca88dad44758da393a Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 20:12:04 -0500 Subject: audit: convert audit watches to use fsnotify instead of inotify Audit currently uses inotify to pin inodes in core and to detect when watched inodes are deleted or unmounted. This patch uses fsnotify instead of inotify. Signed-off-by: Eric Paris --- kernel/audit_watch.c | 208 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 148 insertions(+), 60 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index c2ca716..ff5be84 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -24,18 +24,18 @@ #include #include #include +#include #include #include #include #include -#include #include #include "audit.h" /* * Reference counting: * - * audit_parent: lifetime is from audit_init_parent() to receipt of an IN_IGNORED + * audit_parent: lifetime is from audit_init_parent() to receipt of an FS_IGNORED * event. Each audit_watch holds a reference to its associated parent. * * audit_watch: if added to lists, lifetime is from audit_init_watch() to @@ -57,26 +57,27 @@ struct audit_watch { struct audit_parent { struct list_head ilist; /* tmp list used to free parents */ struct list_head watches; /* anchor for audit_watch->wlist */ - struct inotify_watch wdata; /* inotify watch data */ + struct fsnotify_mark_entry mark; /* fsnotify mark on the inode */ unsigned flags; /* status flags */ }; -/* Inotify handle. */ -struct inotify_handle *audit_ih; +/* fsnotify handle. */ +struct fsnotify_group *audit_watch_group; /* * audit_parent status flags: * * AUDIT_PARENT_INVALID - set anytime rules/watches are auto-removed due to * a filesystem event to ensure we're adding audit watches to a valid parent. - * Technically not needed for IN_DELETE_SELF or IN_UNMOUNT events, as we cannot - * receive them while we have nameidata, but must be used for IN_MOVE_SELF which + * Technically not needed for FS_DELETE_SELF or FS_UNMOUNT events, as we cannot + * receive them while we have nameidata, but must be used for FS_MOVE_SELF which * we can receive while holding nameidata. */ #define AUDIT_PARENT_INVALID 0x001 -/* Inotify events we care about. */ -#define AUDIT_IN_WATCH IN_MOVE|IN_CREATE|IN_DELETE|IN_DELETE_SELF|IN_MOVE_SELF +/* fsnotify events we care about. */ +#define AUDIT_FS_WATCH (FS_MOVE | FS_CREATE | FS_DELETE | FS_DELETE_SELF |\ + FS_MOVE_SELF | FS_EVENT_ON_CHILD) static void audit_free_parent(struct audit_parent *parent) { @@ -84,14 +85,45 @@ static void audit_free_parent(struct audit_parent *parent) kfree(parent); } -static void audit_destroy_watch(struct inotify_watch *i_watch) +static void audit_watch_free_mark(struct fsnotify_mark_entry *entry) { struct audit_parent *parent; - parent = container_of(i_watch, struct audit_parent, wdata); + parent = container_of(entry, struct audit_parent, mark); audit_free_parent(parent); } +static void audit_get_parent(struct audit_parent *parent) +{ + if (likely(parent)) + fsnotify_get_mark(&parent->mark); +} + +static void audit_put_parent(struct audit_parent *parent) +{ + if (likely(parent)) + fsnotify_put_mark(&parent->mark); +} + +/* + * Find and return the audit_parent on the given inode. If found a reference + * is taken on this parent. + */ +static inline struct audit_parent *audit_find_parent(struct inode *inode) +{ + struct audit_parent *parent = NULL; + struct fsnotify_mark_entry *entry; + + spin_lock(&inode->i_lock); + entry = fsnotify_find_mark_entry(audit_watch_group, inode); + spin_unlock(&inode->i_lock); + + if (entry) + parent = container_of(entry, struct audit_parent, mark); + + return parent; +} + void audit_get_watch(struct audit_watch *watch) { atomic_inc(&watch->count); @@ -110,7 +142,7 @@ void audit_put_watch(struct audit_watch *watch) void audit_remove_watch(struct audit_watch *watch) { list_del(&watch->wlist); - put_inotify_watch(&watch->parent->wdata); + audit_put_parent(watch->parent); watch->parent = NULL; audit_put_watch(watch); /* match initial get */ } @@ -130,8 +162,9 @@ int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev) /* Initialize a parent watch entry. */ static struct audit_parent *audit_init_parent(struct nameidata *ndp) { + struct inode *inode = ndp->path.dentry->d_inode; struct audit_parent *parent; - s32 wd; + int ret; parent = kzalloc(sizeof(*parent), GFP_KERNEL); if (unlikely(!parent)) @@ -140,14 +173,14 @@ static struct audit_parent *audit_init_parent(struct nameidata *ndp) INIT_LIST_HEAD(&parent->watches); parent->flags = 0; - inotify_init_watch(&parent->wdata); - /* grab a ref so inotify watch hangs around until we take audit_filter_mutex */ - get_inotify_watch(&parent->wdata); - wd = inotify_add_watch(audit_ih, &parent->wdata, - ndp->path.dentry->d_inode, AUDIT_IN_WATCH); - if (wd < 0) { + fsnotify_init_mark(&parent->mark, audit_watch_free_mark); + parent->mark.mask = AUDIT_FS_WATCH; + /* grab a ref so fsnotify mark hangs around until we take audit_filter_mutex */ + audit_get_parent(parent); + ret = fsnotify_add_mark(&parent->mark, audit_watch_group, inode); + if (ret < 0) { audit_free_parent(parent); - return ERR_PTR(wd); + return ERR_PTR(ret); } return parent; @@ -176,7 +209,7 @@ int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op) { struct audit_watch *watch; - if (!audit_ih) + if (!audit_watch_group) return -EOPNOTSUPP; if (path[0] != '/' || path[len-1] == '/' || @@ -214,7 +247,7 @@ static struct audit_watch *audit_dupe_watch(struct audit_watch *old) new->dev = old->dev; new->ino = old->ino; - get_inotify_watch(&old->parent->wdata); + audit_get_parent(old->parent); new->parent = old->parent; out: @@ -335,19 +368,21 @@ static void audit_remove_parent_watches(struct audit_parent *parent) audit_remove_watch(w); } mutex_unlock(&audit_filter_mutex); + + fsnotify_destroy_mark_by_entry(&parent->mark); } /* Unregister inotify watches for parents on in_list. - * Generates an IN_IGNORED event. */ + * Generates an FS_IGNORED event. */ void audit_watch_inotify_unregister(struct list_head *in_list) { struct audit_parent *p, *n; list_for_each_entry_safe(p, n, in_list, ilist) { list_del(&p->ilist); - inotify_rm_watch(audit_ih, &p->wdata); - /* the unpin matching the pin in audit_remove_watch_rule() */ - unpin_inotify_watch(&p->wdata); + fsnotify_destroy_mark_by_entry(&p->mark); + /* matches the get in audit_remove_watch_rule() */ + audit_put_parent(p); } } @@ -399,7 +434,7 @@ static void audit_put_nd(struct nameidata *ndp, struct nameidata *ndw) } } -/* Associate the given rule with an existing parent inotify_watch. +/* Associate the given rule with an existing parent. * Caller must hold audit_filter_mutex. */ static void audit_add_to_parent(struct audit_krule *krule, struct audit_parent *parent) @@ -407,6 +442,8 @@ static void audit_add_to_parent(struct audit_krule *krule, struct audit_watch *w, *watch = krule->watch; int watch_found = 0; + BUG_ON(!mutex_is_locked(&audit_filter_mutex)); + list_for_each_entry(w, &parent->watches, wlist) { if (strcmp(watch->path, w->path)) continue; @@ -423,7 +460,7 @@ static void audit_add_to_parent(struct audit_krule *krule, } if (!watch_found) { - get_inotify_watch(&parent->wdata); + audit_get_parent(parent); watch->parent = parent; list_add(&watch->wlist, &parent->watches); @@ -436,7 +473,6 @@ static void audit_add_to_parent(struct audit_krule *krule, int audit_add_watch(struct audit_krule *krule, struct list_head **list) { struct audit_watch *watch = krule->watch; - struct inotify_watch *i_watch; struct audit_parent *parent; struct nameidata *ndp = NULL, *ndw = NULL; int h, ret = 0; @@ -462,8 +498,8 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list) * inotify watch is found, inotify_find_watch() grabs a reference before * returning. */ - if (inotify_find_watch(audit_ih, ndp->path.dentry->d_inode, - &i_watch) < 0) { + parent = audit_find_parent(ndp->path.dentry->d_inode); + if (!parent) { parent = audit_init_parent(ndp); if (IS_ERR(parent)) { /* caller expects mutex locked */ @@ -471,8 +507,7 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list) ret = PTR_ERR(parent); goto error; } - } else - parent = container_of(i_watch, struct audit_parent, wdata); + } mutex_lock(&audit_filter_mutex); @@ -482,8 +517,8 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list) else audit_add_to_parent(krule, parent); - /* match get in audit_init_parent or inotify_find_watch */ - put_inotify_watch(&parent->wdata); + /* match get in audit_find_parent or audit_init_parent */ + audit_put_parent(parent); h = audit_hash_ino((u32)watch->ino); *list = &audit_inode_hash[h]; @@ -504,52 +539,105 @@ void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list) audit_remove_watch(watch); if (list_empty(&parent->watches)) { - /* Put parent on the inotify un-registration - * list. Grab a reference before releasing + /* Put parent on the un-registration list. + * Grab a reference before releasing * audit_filter_mutex, to be released in - * audit_inotify_unregister(). + * audit_watch_inotify_unregister(). * If filesystem is going away, just leave * the sucker alone, eviction will take * care of it. */ - if (pin_inotify_watch(&parent->wdata)) - list_add(&parent->ilist, list); + audit_get_parent(parent); + list_add(&parent->ilist, list); } } } -/* Update watch data in audit rules based on inotify events. */ -static void audit_handle_ievent(struct inotify_watch *i_watch, u32 wd, u32 mask, - u32 cookie, const char *dname, struct inode *inode) +static bool audit_watch_should_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask) { + struct fsnotify_mark_entry *entry; + bool send; + + spin_lock(&inode->i_lock); + entry = fsnotify_find_mark_entry(group, inode); + spin_unlock(&inode->i_lock); + if (!entry) + return false; + + mask = (mask & ~FS_EVENT_ON_CHILD); + send = (entry->mask & mask); + + /* find took a reference */ + fsnotify_put_mark(entry); + + return send; +} + +/* Update watch data in audit rules based on fsnotify events. */ +static int audit_watch_handle_event(struct fsnotify_group *group, struct fsnotify_event *event) +{ + struct inode *inode; + __u32 mask = event->mask; + const char *dname = event->file_name; struct audit_parent *parent; - parent = container_of(i_watch, struct audit_parent, wdata); + BUG_ON(group != audit_watch_group); + + parent = audit_find_parent(event->to_tell); + if (unlikely(!parent)) + return 0; + + switch (event->data_type) { + case (FSNOTIFY_EVENT_PATH): + inode = event->path.dentry->d_inode; + break; + case (FSNOTIFY_EVENT_INODE): + inode = event->inode; + break; + default: + BUG(); + inode = NULL; + break; + }; - if (mask & (IN_CREATE|IN_MOVED_TO) && inode) + if (mask & (FS_CREATE|FS_MOVED_TO) && inode) audit_update_watch(parent, dname, inode->i_sb->s_dev, inode->i_ino, 0); - else if (mask & (IN_DELETE|IN_MOVED_FROM)) + else if (mask & (FS_DELETE|FS_MOVED_FROM)) audit_update_watch(parent, dname, (dev_t)-1, (unsigned long)-1, 1); - /* inotify automatically removes the watch and sends IN_IGNORED */ - else if (mask & (IN_DELETE_SELF|IN_UNMOUNT)) + else if (mask & (FS_DELETE_SELF|FS_UNMOUNT|FS_MOVE_SELF)) audit_remove_parent_watches(parent); - /* inotify does not remove the watch, so remove it manually */ - else if(mask & IN_MOVE_SELF) { - audit_remove_parent_watches(parent); - inotify_remove_watch_locked(audit_ih, i_watch); - } else if (mask & IN_IGNORED) - put_inotify_watch(i_watch); + /* moved put_inotify_watch to freeing mark */ + + /* matched the ref taken by audit_find_parent */ + audit_put_parent(parent); + + return 0; +} + +static void audit_watch_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group) +{ + struct audit_parent *parent; + + parent = container_of(entry, struct audit_parent, mark); + /* taken from audit_handle_ievent & FS_IGNORED please figure out what I match... */ + audit_put_parent(parent); } -static const struct inotify_operations audit_inotify_ops = { - .handle_event = audit_handle_ievent, - .destroy_watch = audit_destroy_watch, +static const struct fsnotify_ops audit_watch_fsnotify_ops = { + .should_send_event = audit_watch_should_send_event, + .handle_event = audit_watch_handle_event, + .free_group_priv = NULL, + .freeing_mark = audit_watch_freeing_mark, + .free_event_priv = NULL, }; static int __init audit_watch_init(void) { - audit_ih = inotify_init(&audit_inotify_ops); - if (IS_ERR(audit_ih)) - audit_panic("cannot initialize inotify handle"); + audit_watch_group = fsnotify_obtain_group(AUDIT_WATCH_GROUP_NUM, AUDIT_FS_WATCH, + &audit_watch_fsnotify_ops); + if (IS_ERR(audit_watch_group)) { + audit_watch_group = NULL; + audit_panic("cannot create audit fsnotify group"); + } return 0; } subsys_initcall(audit_watch_init); -- cgit v1.1 From e118e9c5638bbe877aa26b5cd2fd223cc24cdc8a Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 20:12:04 -0500 Subject: audit: redo audit watch locking and refcnt in light of fsnotify fsnotify can handle mutexes to be held across all fsnotify operations since it deals strickly in spinlocks. This can simplify and reduce some of the audit_filter_mutex taking and dropping. Signed-off-by: Eric Paris --- kernel/audit_watch.c | 45 +++++---------------------------------------- 1 file changed, 5 insertions(+), 40 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index ff5be84..da66197 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -58,23 +58,11 @@ struct audit_parent { struct list_head ilist; /* tmp list used to free parents */ struct list_head watches; /* anchor for audit_watch->wlist */ struct fsnotify_mark_entry mark; /* fsnotify mark on the inode */ - unsigned flags; /* status flags */ }; /* fsnotify handle. */ struct fsnotify_group *audit_watch_group; -/* - * audit_parent status flags: - * - * AUDIT_PARENT_INVALID - set anytime rules/watches are auto-removed due to - * a filesystem event to ensure we're adding audit watches to a valid parent. - * Technically not needed for FS_DELETE_SELF or FS_UNMOUNT events, as we cannot - * receive them while we have nameidata, but must be used for FS_MOVE_SELF which - * we can receive while holding nameidata. - */ -#define AUDIT_PARENT_INVALID 0x001 - /* fsnotify events we care about. */ #define AUDIT_FS_WATCH (FS_MOVE | FS_CREATE | FS_DELETE | FS_DELETE_SELF |\ FS_MOVE_SELF | FS_EVENT_ON_CHILD) @@ -171,12 +159,9 @@ static struct audit_parent *audit_init_parent(struct nameidata *ndp) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&parent->watches); - parent->flags = 0; fsnotify_init_mark(&parent->mark, audit_watch_free_mark); parent->mark.mask = AUDIT_FS_WATCH; - /* grab a ref so fsnotify mark hangs around until we take audit_filter_mutex */ - audit_get_parent(parent); ret = fsnotify_add_mark(&parent->mark, audit_watch_group, inode); if (ret < 0) { audit_free_parent(parent); @@ -355,7 +340,6 @@ static void audit_remove_parent_watches(struct audit_parent *parent) struct audit_entry *e; mutex_lock(&audit_filter_mutex); - parent->flags |= AUDIT_PARENT_INVALID; list_for_each_entry_safe(w, nextw, &parent->watches, wlist) { list_for_each_entry_safe(r, nextr, &w->rules, rlist) { e = container_of(r, struct audit_entry, rule); @@ -487,35 +471,25 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list) goto error; } + mutex_lock(&audit_filter_mutex); + /* update watch filter fields */ if (ndw) { watch->dev = ndw->path.dentry->d_inode->i_sb->s_dev; watch->ino = ndw->path.dentry->d_inode->i_ino; } - /* The audit_filter_mutex must not be held during inotify calls because - * we hold it during inotify event callback processing. If an existing - * inotify watch is found, inotify_find_watch() grabs a reference before - * returning. - */ + /* either find an old parent or attach a new one */ parent = audit_find_parent(ndp->path.dentry->d_inode); if (!parent) { parent = audit_init_parent(ndp); if (IS_ERR(parent)) { - /* caller expects mutex locked */ - mutex_lock(&audit_filter_mutex); ret = PTR_ERR(parent); goto error; } } - mutex_lock(&audit_filter_mutex); - - /* parent was moved before we took audit_filter_mutex */ - if (parent->flags & AUDIT_PARENT_INVALID) - ret = -ENOENT; - else - audit_add_to_parent(krule, parent); + audit_add_to_parent(krule, parent); /* match get in audit_find_parent or audit_init_parent */ audit_put_parent(parent); @@ -613,20 +587,11 @@ static int audit_watch_handle_event(struct fsnotify_group *group, struct fsnotif return 0; } -static void audit_watch_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group) -{ - struct audit_parent *parent; - - parent = container_of(entry, struct audit_parent, mark); - /* taken from audit_handle_ievent & FS_IGNORED please figure out what I match... */ - audit_put_parent(parent); -} - static const struct fsnotify_ops audit_watch_fsnotify_ops = { .should_send_event = audit_watch_should_send_event, .handle_event = audit_watch_handle_event, .free_group_priv = NULL, - .freeing_mark = audit_watch_freeing_mark, + .freeing_mark = NULL, .free_event_priv = NULL, }; -- cgit v1.1 From a05fb6cc573130915380e00d182a4c6571cec6b2 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 20:12:05 -0500 Subject: audit: do not get and put just to free a watch deleting audit watch rules is not currently done under audit_filter_mutex. It was done this way because we could not hold the mutex during inotify manipulation. Since we are using fsnotify we don't need to do the extra get/put pair nor do we need the private list on which to store the parents while they are about to be freed. Signed-off-by: Eric Paris --- kernel/audit.h | 3 +-- kernel/audit_watch.c | 27 +++------------------------ kernel/auditfilter.c | 6 +----- 3 files changed, 5 insertions(+), 31 deletions(-) (limited to 'kernel') diff --git a/kernel/audit.h b/kernel/audit.h index 82c8a09..100b454 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -108,8 +108,7 @@ extern void audit_put_watch(struct audit_watch *watch); extern void audit_get_watch(struct audit_watch *watch); extern int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op); extern int audit_add_watch(struct audit_krule *krule, struct list_head **list); -extern void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list); -extern void audit_watch_inotify_unregister(struct list_head *in_list); +extern void audit_remove_watch_rule(struct audit_krule *krule); extern char *audit_watch_path(struct audit_watch *watch); extern int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev); extern struct audit_entry *audit_dupe_rule(struct audit_krule *old); diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index da66197..75ab539 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -55,7 +55,6 @@ struct audit_watch { }; struct audit_parent { - struct list_head ilist; /* tmp list used to free parents */ struct list_head watches; /* anchor for audit_watch->wlist */ struct fsnotify_mark_entry mark; /* fsnotify mark on the inode */ }; @@ -356,20 +355,6 @@ static void audit_remove_parent_watches(struct audit_parent *parent) fsnotify_destroy_mark_by_entry(&parent->mark); } -/* Unregister inotify watches for parents on in_list. - * Generates an FS_IGNORED event. */ -void audit_watch_inotify_unregister(struct list_head *in_list) -{ - struct audit_parent *p, *n; - - list_for_each_entry_safe(p, n, in_list, ilist) { - list_del(&p->ilist); - fsnotify_destroy_mark_by_entry(&p->mark); - /* matches the get in audit_remove_watch_rule() */ - audit_put_parent(p); - } -} - /* Get path information necessary for adding watches. */ static int audit_get_nd(char *path, struct nameidata **ndp, struct nameidata **ndw) { @@ -502,7 +487,7 @@ error: } -void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list) +void audit_remove_watch_rule(struct audit_krule *krule) { struct audit_watch *watch = krule->watch; struct audit_parent *parent = watch->parent; @@ -513,15 +498,9 @@ void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list) audit_remove_watch(watch); if (list_empty(&parent->watches)) { - /* Put parent on the un-registration list. - * Grab a reference before releasing - * audit_filter_mutex, to be released in - * audit_watch_inotify_unregister(). - * If filesystem is going away, just leave - * the sucker alone, eviction will take - * care of it. */ audit_get_parent(parent); - list_add(&parent->ilist, list); + fsnotify_destroy_mark_by_entry(&parent->mark); + audit_put_parent(parent); } } } diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index ac87577..eb76754 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -945,7 +945,6 @@ static inline int audit_del_rule(struct audit_entry *entry) struct audit_watch *watch = entry->rule.watch; struct audit_tree *tree = entry->rule.tree; struct list_head *list; - LIST_HEAD(inotify_unregister_list); int ret = 0; #ifdef CONFIG_AUDITSYSCALL int dont_count = 0; @@ -965,7 +964,7 @@ static inline int audit_del_rule(struct audit_entry *entry) } if (e->rule.watch) - audit_remove_watch_rule(&e->rule, &inotify_unregister_list); + audit_remove_watch_rule(&e->rule); if (e->rule.tree) audit_remove_tree_rule(&e->rule); @@ -983,9 +982,6 @@ static inline int audit_del_rule(struct audit_entry *entry) #endif mutex_unlock(&audit_filter_mutex); - if (!list_empty(&inotify_unregister_list)) - audit_watch_inotify_unregister(&inotify_unregister_list); - out: if (watch) audit_put_watch(watch); /* match initial get */ -- cgit v1.1 From 40554c3dae83bd892b7fbfaa2ea9de739cbcf065 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 20:12:05 -0500 Subject: fsnotify: allow addition of duplicate fsnotify marks This patch allows a task to add a second fsnotify mark to an inode for the same group. This mark will be added to the end of the inode's list and this will never be found by the stand fsnotify_find_mark() function. This is useful if a user wants to add a new mark before removing the old one. Signed-off-by: Eric Paris --- kernel/audit_watch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 75ab539..c44de0c 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -161,7 +161,7 @@ static struct audit_parent *audit_init_parent(struct nameidata *ndp) fsnotify_init_mark(&parent->mark, audit_watch_free_mark); parent->mark.mask = AUDIT_FS_WATCH; - ret = fsnotify_add_mark(&parent->mark, audit_watch_group, inode); + ret = fsnotify_add_mark(&parent->mark, audit_watch_group, inode, 0); if (ret < 0) { audit_free_parent(parent); return ERR_PTR(ret); -- cgit v1.1 From 28a3a7eb3b1f3e7d834e19f06e794e429058a4dd Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 20:12:05 -0500 Subject: audit: reimplement audit_trees using fsnotify rather than inotify Simply switch audit_trees from using inotify to using fsnotify for it's inode pinning and disappearing act information. Signed-off-by: Eric Paris --- kernel/audit_tree.c | 234 +++++++++++++++++++++++++++++----------------------- kernel/auditsc.c | 4 +- 2 files changed, 132 insertions(+), 106 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 46a57b5..a164600 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -1,5 +1,5 @@ #include "audit.h" -#include +#include #include #include #include @@ -22,7 +22,7 @@ struct audit_tree { struct audit_chunk { struct list_head hash; - struct inotify_watch watch; + struct fsnotify_mark_entry mark; struct list_head trees; /* with root here */ int dead; int count; @@ -59,7 +59,7 @@ static LIST_HEAD(prune_list); * tree is refcounted; one reference for "some rules on rules_list refer to * it", one for each chunk with pointer to it. * - * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount + * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount * of watch contributes 1 to .refs). * * node.index allows to get from node.list to containing chunk. @@ -68,7 +68,7 @@ static LIST_HEAD(prune_list); * that makes a difference. Some. */ -static struct inotify_handle *rtree_ih; +static struct fsnotify_group *audit_tree_group; static struct audit_tree *alloc_tree(const char *s) { @@ -111,29 +111,6 @@ const char *audit_tree_path(struct audit_tree *tree) return tree->pathname; } -static struct audit_chunk *alloc_chunk(int count) -{ - struct audit_chunk *chunk; - size_t size; - int i; - - size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); - chunk = kzalloc(size, GFP_KERNEL); - if (!chunk) - return NULL; - - INIT_LIST_HEAD(&chunk->hash); - INIT_LIST_HEAD(&chunk->trees); - chunk->count = count; - atomic_long_set(&chunk->refs, 1); - for (i = 0; i < count; i++) { - INIT_LIST_HEAD(&chunk->owners[i].list); - chunk->owners[i].index = i; - } - inotify_init_watch(&chunk->watch); - return chunk; -} - static void free_chunk(struct audit_chunk *chunk) { int i; @@ -157,6 +134,35 @@ static void __put_chunk(struct rcu_head *rcu) audit_put_chunk(chunk); } +static void audit_tree_destroy_watch(struct fsnotify_mark_entry *entry) +{ + struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); + call_rcu(&chunk->head, __put_chunk); +} + +static struct audit_chunk *alloc_chunk(int count) +{ + struct audit_chunk *chunk; + size_t size; + int i; + + size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); + chunk = kzalloc(size, GFP_KERNEL); + if (!chunk) + return NULL; + + INIT_LIST_HEAD(&chunk->hash); + INIT_LIST_HEAD(&chunk->trees); + chunk->count = count; + atomic_long_set(&chunk->refs, 1); + for (i = 0; i < count; i++) { + INIT_LIST_HEAD(&chunk->owners[i].list); + chunk->owners[i].index = i; + } + fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); + return chunk; +} + enum {HASH_SIZE = 128}; static struct list_head chunk_hash_heads[HASH_SIZE]; static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); @@ -167,10 +173,15 @@ static inline struct list_head *chunk_hash(const struct inode *inode) return chunk_hash_heads + n % HASH_SIZE; } -/* hash_lock is held by caller */ +/* hash_lock & entry->lock is held by caller */ static void insert_hash(struct audit_chunk *chunk) { - struct list_head *list = chunk_hash(chunk->watch.inode); + struct fsnotify_mark_entry *entry = &chunk->mark; + struct list_head *list; + + if (!entry->inode) + return; + list = chunk_hash(entry->inode); list_add_rcu(&chunk->hash, list); } @@ -181,7 +192,8 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode) struct audit_chunk *p; list_for_each_entry_rcu(p, list, hash) { - if (p->watch.inode == inode) { + /* mark.inode may have gone NULL, but who cares? */ + if (p->mark.inode == inode) { atomic_long_inc(&p->refs); return p; } @@ -210,38 +222,19 @@ static struct audit_chunk *find_chunk(struct node *p) static void untag_chunk(struct node *p) { struct audit_chunk *chunk = find_chunk(p); + struct fsnotify_mark_entry *entry = &chunk->mark; struct audit_chunk *new; struct audit_tree *owner; int size = chunk->count - 1; int i, j; - if (!pin_inotify_watch(&chunk->watch)) { - /* - * Filesystem is shutting down; all watches are getting - * evicted, just take it off the node list for this - * tree and let the eviction logics take care of the - * rest. - */ - owner = p->owner; - if (owner->root == chunk) { - list_del_init(&owner->same_root); - owner->root = NULL; - } - list_del_init(&p->list); - p->owner = NULL; - put_tree(owner); - return; - } + fsnotify_get_mark(entry); spin_unlock(&hash_lock); - /* - * pin_inotify_watch() succeeded, so the watch won't go away - * from under us. - */ - mutex_lock(&chunk->watch.inode->inotify_mutex); - if (chunk->dead) { - mutex_unlock(&chunk->watch.inode->inotify_mutex); + spin_lock(&entry->lock); + if (chunk->dead || !entry->inode) { + spin_unlock(&entry->lock); goto out; } @@ -256,16 +249,17 @@ static void untag_chunk(struct node *p) list_del_init(&p->list); list_del_rcu(&chunk->hash); spin_unlock(&hash_lock); - inotify_evict_watch(&chunk->watch); - mutex_unlock(&chunk->watch.inode->inotify_mutex); - put_inotify_watch(&chunk->watch); + spin_unlock(&entry->lock); + fsnotify_destroy_mark_by_entry(entry); + fsnotify_put_mark(entry); goto out; } new = alloc_chunk(size); if (!new) goto Fallback; - if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) { + fsnotify_duplicate_mark(&new->mark, entry); + if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, 1)) { free_chunk(new); goto Fallback; } @@ -298,9 +292,9 @@ static void untag_chunk(struct node *p) list_for_each_entry(owner, &new->trees, same_root) owner->root = new; spin_unlock(&hash_lock); - inotify_evict_watch(&chunk->watch); - mutex_unlock(&chunk->watch.inode->inotify_mutex); - put_inotify_watch(&chunk->watch); + spin_unlock(&entry->lock); + fsnotify_destroy_mark_by_entry(entry); + fsnotify_put_mark(entry); goto out; Fallback: @@ -314,31 +308,33 @@ Fallback: p->owner = NULL; put_tree(owner); spin_unlock(&hash_lock); - mutex_unlock(&chunk->watch.inode->inotify_mutex); + spin_unlock(&entry->lock); out: - unpin_inotify_watch(&chunk->watch); + fsnotify_put_mark(entry); spin_lock(&hash_lock); } static int create_chunk(struct inode *inode, struct audit_tree *tree) { + struct fsnotify_mark_entry *entry; struct audit_chunk *chunk = alloc_chunk(1); if (!chunk) return -ENOMEM; - if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) { + entry = &chunk->mark; + if (fsnotify_add_mark(entry, audit_tree_group, inode, 0)) { free_chunk(chunk); return -ENOSPC; } - mutex_lock(&inode->inotify_mutex); + spin_lock(&entry->lock); spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; - inotify_evict_watch(&chunk->watch); - mutex_unlock(&inode->inotify_mutex); - put_inotify_watch(&chunk->watch); + spin_unlock(&entry->lock); + fsnotify_destroy_mark_by_entry(entry); + fsnotify_put_mark(entry); return 0; } chunk->owners[0].index = (1U << 31); @@ -351,30 +347,33 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) } insert_hash(chunk); spin_unlock(&hash_lock); - mutex_unlock(&inode->inotify_mutex); + spin_unlock(&entry->lock); return 0; } /* the first tagged inode becomes root of tree */ static int tag_chunk(struct inode *inode, struct audit_tree *tree) { - struct inotify_watch *watch; + struct fsnotify_mark_entry *old_entry, *chunk_entry; struct audit_tree *owner; struct audit_chunk *chunk, *old; struct node *p; int n; - if (inotify_find_watch(rtree_ih, inode, &watch) < 0) + spin_lock(&inode->i_lock); + old_entry = fsnotify_find_mark_entry(audit_tree_group, inode); + spin_unlock(&inode->i_lock); + if (!old_entry) return create_chunk(inode, tree); - old = container_of(watch, struct audit_chunk, watch); + old = container_of(old_entry, struct audit_chunk, mark); /* are we already there? */ spin_lock(&hash_lock); for (n = 0; n < old->count; n++) { if (old->owners[n].owner == tree) { spin_unlock(&hash_lock); - put_inotify_watch(&old->watch); + fsnotify_put_mark(old_entry); return 0; } } @@ -382,25 +381,44 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) chunk = alloc_chunk(old->count + 1); if (!chunk) { - put_inotify_watch(&old->watch); + fsnotify_put_mark(old_entry); return -ENOMEM; } - mutex_lock(&inode->inotify_mutex); - if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) { - mutex_unlock(&inode->inotify_mutex); - put_inotify_watch(&old->watch); + chunk_entry = &chunk->mark; + + spin_lock(&old_entry->lock); + if (!old_entry->inode) { + /* old_entry is being shot, lets just lie */ + spin_unlock(&old_entry->lock); + fsnotify_put_mark(old_entry); free_chunk(chunk); + return -ENOENT; + } + + fsnotify_duplicate_mark(chunk_entry, old_entry); + if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, 1)) { + spin_unlock(&old_entry->lock); + free_chunk(chunk); + fsnotify_put_mark(old_entry); return -ENOSPC; } + + /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */ + spin_lock(&chunk_entry->lock); spin_lock(&hash_lock); + + /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */ if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; - inotify_evict_watch(&chunk->watch); - mutex_unlock(&inode->inotify_mutex); - put_inotify_watch(&old->watch); - put_inotify_watch(&chunk->watch); + spin_unlock(&chunk_entry->lock); + spin_unlock(&old_entry->lock); + + fsnotify_destroy_mark_by_entry(chunk_entry); + + fsnotify_put_mark(chunk_entry); + fsnotify_put_mark(old_entry); return 0; } list_replace_init(&old->trees, &chunk->trees); @@ -426,10 +444,11 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) list_add(&tree->same_root, &chunk->trees); } spin_unlock(&hash_lock); - inotify_evict_watch(&old->watch); - mutex_unlock(&inode->inotify_mutex); - put_inotify_watch(&old->watch); /* pair to inotify_find_watch */ - put_inotify_watch(&old->watch); /* and kill it */ + spin_unlock(&chunk_entry->lock); + spin_unlock(&old_entry->lock); + fsnotify_destroy_mark_by_entry(old_entry); + fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ + fsnotify_put_mark(old_entry); /* and kill it */ return 0; } @@ -584,7 +603,9 @@ void audit_trim_trees(void) spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) { - struct inode *inode = find_chunk(node)->watch.inode; + struct audit_chunk *chunk = find_chunk(node); + /* this could be NULL if the watch is dieing else where... */ + struct inode *inode = chunk->mark.inode; node->index |= 1U<<31; if (iterate_mounts(compare_root, inode, root_mnt)) node->index &= ~(1U<<31); @@ -846,7 +867,6 @@ void audit_kill_trees(struct list_head *list) * Here comes the stuff asynchronous to auditctl operations */ -/* inode->inotify_mutex is locked */ static void evict_chunk(struct audit_chunk *chunk) { struct audit_tree *owner; @@ -885,35 +905,41 @@ static void evict_chunk(struct audit_chunk *chunk) mutex_unlock(&audit_filter_mutex); } -static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask, - u32 cookie, const char *dname, struct inode *inode) +static int audit_tree_handle_event(struct fsnotify_group *group, struct fsnotify_event *event) { - struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); + BUG(); + return -EOPNOTSUPP; +} - if (mask & IN_IGNORED) { - evict_chunk(chunk); - put_inotify_watch(watch); - } +static void audit_tree_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group) +{ + struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); + + evict_chunk(chunk); + fsnotify_put_mark(entry); } -static void destroy_watch(struct inotify_watch *watch) +static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask) { - struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); - call_rcu(&chunk->head, __put_chunk); + return 0; } -static const struct inotify_operations rtree_inotify_ops = { - .handle_event = handle_event, - .destroy_watch = destroy_watch, +static const struct fsnotify_ops audit_tree_ops = { + .handle_event = audit_tree_handle_event, + .should_send_event = audit_tree_send_event, + .free_group_priv = NULL, + .free_event_priv = NULL, + .freeing_mark = audit_tree_freeing_mark, }; static int __init audit_tree_init(void) { int i; - rtree_ih = inotify_init(&rtree_inotify_ops); - if (IS_ERR(rtree_ih)) - audit_panic("cannot initialize inotify handle for rectree watches"); + audit_tree_group = fsnotify_obtain_group(AUDIT_TREE_GROUP_NUM, + 0, &audit_tree_ops); + if (IS_ERR(audit_tree_group)) + audit_panic("cannot initialize fsnotify group for rectree watches"); for (i = 0; i < HASH_SIZE; i++) INIT_LIST_HEAD(&chunk_hash_heads[i]); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 240063c..786901c 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1725,7 +1725,7 @@ static inline void handle_one(const struct inode *inode) struct audit_tree_refs *p; struct audit_chunk *chunk; int count; - if (likely(list_empty(&inode->inotify_watches))) + if (likely(hlist_empty(&inode->i_fsnotify_mark_entries))) return; context = current->audit_context; p = context->trees; @@ -1768,7 +1768,7 @@ retry: seq = read_seqbegin(&rename_lock); for(;;) { struct inode *inode = d->d_inode; - if (inode && unlikely(!list_empty(&inode->inotify_watches))) { + if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_mark_entries))) { struct audit_chunk *chunk; chunk = audit_tree_lookup(inode); if (chunk) { -- cgit v1.1 From 939a67fc4cbab8ca11c90da8a769d7e965d66a9b Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 20:12:06 -0500 Subject: Audit: split audit watch Kconfig Audit watch should depend on CONFIG_AUDIT_SYSCALL and should select FSNOTIFY. This splits the spagetti like mixing of audit_watch and audit_filter code so they can be configured seperately. Signed-off-by: Eric Paris --- kernel/Makefile | 5 +++-- kernel/audit.h | 14 +++++++++++++- 2 files changed, 16 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/Makefile b/kernel/Makefile index 057472f..202df4e 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -70,10 +70,11 @@ obj-$(CONFIG_IKCONFIG) += configs.o obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o obj-$(CONFIG_SMP) += stop_machine.o obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o -obj-$(CONFIG_AUDIT) += audit.o auditfilter.o audit_watch.o +obj-$(CONFIG_AUDIT) += audit.o auditfilter.o obj-$(CONFIG_AUDITSYSCALL) += auditsc.o -obj-$(CONFIG_GCOV_KERNEL) += gcov/ +obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o obj-$(CONFIG_AUDIT_TREE) += audit_tree.o +obj-$(CONFIG_GCOV_KERNEL) += gcov/ obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KGDB) += debug/ obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o diff --git a/kernel/audit.h b/kernel/audit.h index 100b454..f7206db 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -103,7 +103,10 @@ extern struct mutex audit_filter_mutex; extern void audit_free_rule_rcu(struct rcu_head *); extern struct list_head audit_filter_list[]; +extern struct audit_entry *audit_dupe_rule(struct audit_krule *old); + /* audit watch functions */ +#ifdef CONFIG_AUDIT_WATCH extern void audit_put_watch(struct audit_watch *watch); extern void audit_get_watch(struct audit_watch *watch); extern int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op); @@ -111,7 +114,16 @@ extern int audit_add_watch(struct audit_krule *krule, struct list_head **list); extern void audit_remove_watch_rule(struct audit_krule *krule); extern char *audit_watch_path(struct audit_watch *watch); extern int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev); -extern struct audit_entry *audit_dupe_rule(struct audit_krule *old); +#else +#define audit_put_watch(w) {} +#define audit_get_watch(w) {} +#define audit_to_watch(k, p, l, o) (-EINVAL) +#define audit_add_watch(k, l) (-EINVAL) +#define audit_remove_watch_rule(k) BUG() +#define audit_watch_path(w) "" +#define audit_watch_compare(w, i, d) 0 + +#endif /* CONFIG_AUDIT_WATCH */ #ifdef CONFIG_AUDIT_TREE extern struct audit_chunk *audit_tree_lookup(const struct inode *); -- cgit v1.1 From 1a3aedbce416dfdbd5d5ac14a0edbcf21a62ee50 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 20:12:06 -0500 Subject: Audit: audit watch init should not be before fsnotify init Audit watch init and fsnotify init both use subsys_initcall() but since the audit watch code is linked in before the fsnotify code the audit watch code would be using the fsnotify srcu struct before it was initialized. This patch fixes that problem by moving audit watch init to device_initcall() so it happens after fsnotify is ready. Reported-by: Stephen Rothwell Signed-off-by: Eric Paris Tested-by : Sachin Sant --- kernel/audit_watch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index c44de0c..f8543a4 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -584,4 +584,4 @@ static int __init audit_watch_init(void) } return 0; } -subsys_initcall(audit_watch_init); +device_initcall(audit_watch_init); -- cgit v1.1 From 2dfc1cae4c42b93b831b2417540df2b895ab7108 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 20:30:52 -0500 Subject: inotify: remove inotify in kernel interface nothing uses inotify in the kernel, drop it! Signed-off-by: Eric Paris --- kernel/auditsc.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 786901c..853185f 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -65,7 +65,6 @@ #include #include #include -#include #include #include -- cgit v1.1 From 7b0a04fbfb35650941af87728d4891515b4fc179 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 21:24:21 -0500 Subject: fsnotify: provide the data type to should_send_event fanotify is only interested in event types which contain enough information to open the original file in the context of the fanotify listener. Since fanotify may not want to send events if that data isn't present we pass the data type to the should_send_event function call so fanotify can express its lack of interest. Signed-off-by: Eric Paris --- kernel/audit_tree.c | 3 ++- kernel/audit_watch.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index a164600..b5417cd 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -919,7 +919,8 @@ static void audit_tree_freeing_mark(struct fsnotify_mark_entry *entry, struct fs fsnotify_put_mark(entry); } -static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask) +static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, + __u32 mask, int data_type) { return 0; } diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index f8543a4..67d8f2f 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -505,7 +505,8 @@ void audit_remove_watch_rule(struct audit_krule *krule) } } -static bool audit_watch_should_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask) +static bool audit_watch_should_send_event(struct fsnotify_group *group, struct inode *inode, + __u32 mask, int data_type) { struct fsnotify_mark_entry *entry; bool send; -- cgit v1.1 From 8112e2d6a7356e8c3ff1f7f3c86f375ed0305705 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 21:24:21 -0500 Subject: fsnotify: include data in should_send calls fanotify is going to need to look at file->private_data to know if an event should be sent or not. This passes the data (which might be a file, dentry, inode, or none) to the should_send function calls so fanotify can get that information when available Signed-off-by: Eric Paris --- kernel/audit_tree.c | 2 +- kernel/audit_watch.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index b5417cd..e3d63b5 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -920,7 +920,7 @@ static void audit_tree_freeing_mark(struct fsnotify_mark_entry *entry, struct fs } static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, - __u32 mask, int data_type) + __u32 mask, void *data, int data_type) { return 0; } diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 67d8f2f..85c43aa2 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -506,7 +506,7 @@ void audit_remove_watch_rule(struct audit_krule *krule) } static bool audit_watch_should_send_event(struct fsnotify_group *group, struct inode *inode, - __u32 mask, int data_type) + __u32 mask, void *data, int data_type) { struct fsnotify_mark_entry *entry; bool send; -- cgit v1.1 From 74be0cc82835aecad332a29896b0f212ba893403 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 21:24:22 -0500 Subject: fsnotify: remove group_num altogether The original fsnotify interface has a group-num which was intended to be able to find a group after it was added. I no longer think this is a necessary thing to do and so we remove the group_num. Signed-off-by: Eric Paris --- kernel/audit_tree.c | 3 +-- kernel/audit_watch.c | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index e3d63b5..59065e7 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -937,8 +937,7 @@ static int __init audit_tree_init(void) { int i; - audit_tree_group = fsnotify_obtain_group(AUDIT_TREE_GROUP_NUM, - 0, &audit_tree_ops); + audit_tree_group = fsnotify_obtain_group(0, &audit_tree_ops); if (IS_ERR(audit_tree_group)) audit_panic("cannot initialize fsnotify group for rectree watches"); diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 85c43aa2..c500104 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -577,7 +577,7 @@ static const struct fsnotify_ops audit_watch_fsnotify_ops = { static int __init audit_watch_init(void) { - audit_watch_group = fsnotify_obtain_group(AUDIT_WATCH_GROUP_NUM, AUDIT_FS_WATCH, + audit_watch_group = fsnotify_obtain_group(AUDIT_FS_WATCH, &audit_watch_fsnotify_ops); if (IS_ERR(audit_watch_group)) { audit_watch_group = NULL; -- cgit v1.1 From ffab83402f01555a5fa32efb48a4dd0ce8d12ef5 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 21:24:22 -0500 Subject: fsnotify: fsnotify_obtain_group should be fsnotify_alloc_group fsnotify_obtain_group was intended to be able to find an already existing group. Nothing uses that functionality. This just renames it to fsnotify_alloc_group so it is clear what it is doing. Signed-off-by: Eric Paris --- kernel/audit_tree.c | 2 +- kernel/audit_watch.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 59065e7..813274d 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -937,7 +937,7 @@ static int __init audit_tree_init(void) { int i; - audit_tree_group = fsnotify_obtain_group(0, &audit_tree_ops); + audit_tree_group = fsnotify_alloc_group(0, &audit_tree_ops); if (IS_ERR(audit_tree_group)) audit_panic("cannot initialize fsnotify group for rectree watches"); diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index c500104..0f03a6a 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -577,8 +577,8 @@ static const struct fsnotify_ops audit_watch_fsnotify_ops = { static int __init audit_watch_init(void) { - audit_watch_group = fsnotify_obtain_group(AUDIT_FS_WATCH, - &audit_watch_fsnotify_ops); + audit_watch_group = fsnotify_alloc_group(AUDIT_FS_WATCH, + &audit_watch_fsnotify_ops); if (IS_ERR(audit_watch_group)) { audit_watch_group = NULL; audit_panic("cannot create audit fsnotify group"); -- cgit v1.1 From 220d14df0dc587c06b97762829a41157c9375b94 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 21:24:22 -0500 Subject: Audit: only set group mask when something is being watched Currently the audit watch group always sets a mask equal to all events it might care about. We instead should only set the group mask if we are actually watching inodes. This should be a perf win when audit watches are compiled in. Signed-off-by: Eric Paris --- kernel/audit_watch.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 0f03a6a..87408b2 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -167,6 +167,8 @@ static struct audit_parent *audit_init_parent(struct nameidata *ndp) return ERR_PTR(ret); } + fsnotify_recalc_group_mask(audit_watch_group); + return parent; } @@ -353,6 +355,9 @@ static void audit_remove_parent_watches(struct audit_parent *parent) mutex_unlock(&audit_filter_mutex); fsnotify_destroy_mark_by_entry(&parent->mark); + + fsnotify_recalc_group_mask(audit_watch_group); + } /* Get path information necessary for adding watches. */ @@ -503,6 +508,9 @@ void audit_remove_watch_rule(struct audit_krule *krule) audit_put_parent(parent); } } + + fsnotify_recalc_group_mask(audit_watch_group); + } static bool audit_watch_should_send_event(struct fsnotify_group *group, struct inode *inode, @@ -577,8 +585,7 @@ static const struct fsnotify_ops audit_watch_fsnotify_ops = { static int __init audit_watch_init(void) { - audit_watch_group = fsnotify_alloc_group(AUDIT_FS_WATCH, - &audit_watch_fsnotify_ops); + audit_watch_group = fsnotify_alloc_group(0, &audit_watch_fsnotify_ops); if (IS_ERR(audit_watch_group)) { audit_watch_group = NULL; audit_panic("cannot create audit fsnotify group"); -- cgit v1.1 From 0d2e2a1d00d7d23e5bd9bb0935cde7c3d5835c56 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 21:24:22 -0500 Subject: fsnotify: drop mask argument from fsnotify_alloc_group Nothing uses the mask argument to fsnotify_alloc_group. This patch drops that argument. Signed-off-by: Eric Paris --- kernel/audit_tree.c | 2 +- kernel/audit_watch.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 813274d..04f1688 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -937,7 +937,7 @@ static int __init audit_tree_init(void) { int i; - audit_tree_group = fsnotify_alloc_group(0, &audit_tree_ops); + audit_tree_group = fsnotify_alloc_group(&audit_tree_ops); if (IS_ERR(audit_tree_group)) audit_panic("cannot initialize fsnotify group for rectree watches"); diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 87408b2..83d5f96 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -585,7 +585,7 @@ static const struct fsnotify_ops audit_watch_fsnotify_ops = { static int __init audit_watch_init(void) { - audit_watch_group = fsnotify_alloc_group(0, &audit_watch_fsnotify_ops); + audit_watch_group = fsnotify_alloc_group(&audit_watch_fsnotify_ops); if (IS_ERR(audit_watch_group)) { audit_watch_group = NULL; audit_panic("cannot create audit fsnotify group"); -- cgit v1.1 From 3a9fb89f4cd04c23e16397befba92efb5d989b74 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 21:24:23 -0500 Subject: fsnotify: include vfsmount in should_send_event when appropriate To ensure that a group will not duplicate events when it receives it based on the vfsmount and the inode should_send_event test we should distinguish those two cases. We pass a vfsmount to this function so groups can make their own determinations. Signed-off-by: Eric Paris --- kernel/audit_tree.c | 3 ++- kernel/audit_watch.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 04f1688..ecf0bf2 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -920,7 +920,8 @@ static void audit_tree_freeing_mark(struct fsnotify_mark_entry *entry, struct fs } static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, - __u32 mask, void *data, int data_type) + struct vfsmount *mnt, __u32 mask, void *data, + int data_type) { return 0; } diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 83d5f96..6304ee5 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -514,7 +514,8 @@ void audit_remove_watch_rule(struct audit_krule *krule) } static bool audit_watch_should_send_event(struct fsnotify_group *group, struct inode *inode, - __u32 mask, void *data, int data_type) + struct vfsmount *mnt, __u32 mask, void *data, + int data_type) { struct fsnotify_mark_entry *entry; bool send; -- cgit v1.1 From 2823e04de4f1a49087b58ff2bb8f61361ffd9321 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 21:24:23 -0500 Subject: fsnotify: put inode specific fields in an fsnotify_mark in a union The addition of marks on vfs mounts will be simplified if the inode specific parts of a mark and the vfsmnt specific parts of a mark are actually in a union so naming can be easy. This patch just implements the inode struct and the union. Signed-off-by: Eric Paris --- kernel/audit_tree.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index ecf0bf2..c21b05d 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -179,9 +179,9 @@ static void insert_hash(struct audit_chunk *chunk) struct fsnotify_mark_entry *entry = &chunk->mark; struct list_head *list; - if (!entry->inode) + if (!entry->i.inode) return; - list = chunk_hash(entry->inode); + list = chunk_hash(entry->i.inode); list_add_rcu(&chunk->hash, list); } @@ -193,7 +193,7 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode) list_for_each_entry_rcu(p, list, hash) { /* mark.inode may have gone NULL, but who cares? */ - if (p->mark.inode == inode) { + if (p->mark.i.inode == inode) { atomic_long_inc(&p->refs); return p; } @@ -233,7 +233,7 @@ static void untag_chunk(struct node *p) spin_unlock(&hash_lock); spin_lock(&entry->lock); - if (chunk->dead || !entry->inode) { + if (chunk->dead || !entry->i.inode) { spin_unlock(&entry->lock); goto out; } @@ -259,7 +259,7 @@ static void untag_chunk(struct node *p) if (!new) goto Fallback; fsnotify_duplicate_mark(&new->mark, entry); - if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, 1)) { + if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, 1)) { free_chunk(new); goto Fallback; } @@ -388,7 +388,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) chunk_entry = &chunk->mark; spin_lock(&old_entry->lock); - if (!old_entry->inode) { + if (!old_entry->i.inode) { /* old_entry is being shot, lets just lie */ spin_unlock(&old_entry->lock); fsnotify_put_mark(old_entry); @@ -397,7 +397,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) } fsnotify_duplicate_mark(chunk_entry, old_entry); - if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, 1)) { + if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, 1)) { spin_unlock(&old_entry->lock); free_chunk(chunk); fsnotify_put_mark(old_entry); @@ -605,7 +605,7 @@ void audit_trim_trees(void) list_for_each_entry(node, &tree->chunks, list) { struct audit_chunk *chunk = find_chunk(node); /* this could be NULL if the watch is dieing else where... */ - struct inode *inode = chunk->mark.inode; + struct inode *inode = chunk->mark.i.inode; node->index |= 1U<<31; if (iterate_mounts(compare_root, inode, root_mnt)) node->index &= ~(1U<<31); -- cgit v1.1 From e61ce86737b4d60521e4e71f9892fe4bdcfb688b Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 21:24:24 -0500 Subject: fsnotify: rename fsnotify_mark_entry to just fsnotify_mark The name is long and it serves no real purpose. So rename fsnotify_mark_entry to just fsnotify_mark. Signed-off-by: Eric Paris --- kernel/audit_tree.c | 14 +++++++------- kernel/audit_watch.c | 8 ++++---- kernel/auditsc.c | 4 ++-- 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index c21b05d..f16f909 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -22,7 +22,7 @@ struct audit_tree { struct audit_chunk { struct list_head hash; - struct fsnotify_mark_entry mark; + struct fsnotify_mark mark; struct list_head trees; /* with root here */ int dead; int count; @@ -134,7 +134,7 @@ static void __put_chunk(struct rcu_head *rcu) audit_put_chunk(chunk); } -static void audit_tree_destroy_watch(struct fsnotify_mark_entry *entry) +static void audit_tree_destroy_watch(struct fsnotify_mark *entry) { struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); call_rcu(&chunk->head, __put_chunk); @@ -176,7 +176,7 @@ static inline struct list_head *chunk_hash(const struct inode *inode) /* hash_lock & entry->lock is held by caller */ static void insert_hash(struct audit_chunk *chunk) { - struct fsnotify_mark_entry *entry = &chunk->mark; + struct fsnotify_mark *entry = &chunk->mark; struct list_head *list; if (!entry->i.inode) @@ -222,7 +222,7 @@ static struct audit_chunk *find_chunk(struct node *p) static void untag_chunk(struct node *p) { struct audit_chunk *chunk = find_chunk(p); - struct fsnotify_mark_entry *entry = &chunk->mark; + struct fsnotify_mark *entry = &chunk->mark; struct audit_chunk *new; struct audit_tree *owner; int size = chunk->count - 1; @@ -316,7 +316,7 @@ out: static int create_chunk(struct inode *inode, struct audit_tree *tree) { - struct fsnotify_mark_entry *entry; + struct fsnotify_mark *entry; struct audit_chunk *chunk = alloc_chunk(1); if (!chunk) return -ENOMEM; @@ -354,7 +354,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) /* the first tagged inode becomes root of tree */ static int tag_chunk(struct inode *inode, struct audit_tree *tree) { - struct fsnotify_mark_entry *old_entry, *chunk_entry; + struct fsnotify_mark *old_entry, *chunk_entry; struct audit_tree *owner; struct audit_chunk *chunk, *old; struct node *p; @@ -911,7 +911,7 @@ static int audit_tree_handle_event(struct fsnotify_group *group, struct fsnotify return -EOPNOTSUPP; } -static void audit_tree_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group) +static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group) { struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 6304ee5..d8cb55a 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -56,7 +56,7 @@ struct audit_watch { struct audit_parent { struct list_head watches; /* anchor for audit_watch->wlist */ - struct fsnotify_mark_entry mark; /* fsnotify mark on the inode */ + struct fsnotify_mark mark; /* fsnotify mark on the inode */ }; /* fsnotify handle. */ @@ -72,7 +72,7 @@ static void audit_free_parent(struct audit_parent *parent) kfree(parent); } -static void audit_watch_free_mark(struct fsnotify_mark_entry *entry) +static void audit_watch_free_mark(struct fsnotify_mark *entry) { struct audit_parent *parent; @@ -99,7 +99,7 @@ static void audit_put_parent(struct audit_parent *parent) static inline struct audit_parent *audit_find_parent(struct inode *inode) { struct audit_parent *parent = NULL; - struct fsnotify_mark_entry *entry; + struct fsnotify_mark *entry; spin_lock(&inode->i_lock); entry = fsnotify_find_mark_entry(audit_watch_group, inode); @@ -517,7 +517,7 @@ static bool audit_watch_should_send_event(struct fsnotify_group *group, struct i struct vfsmount *mnt, __u32 mask, void *data, int data_type) { - struct fsnotify_mark_entry *entry; + struct fsnotify_mark *entry; bool send; spin_lock(&inode->i_lock); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 853185f..b87a63b 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1724,7 +1724,7 @@ static inline void handle_one(const struct inode *inode) struct audit_tree_refs *p; struct audit_chunk *chunk; int count; - if (likely(hlist_empty(&inode->i_fsnotify_mark_entries))) + if (likely(hlist_empty(&inode->i_fsnotify_marks))) return; context = current->audit_context; p = context->trees; @@ -1767,7 +1767,7 @@ retry: seq = read_seqbegin(&rename_lock); for(;;) { struct inode *inode = d->d_inode; - if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_mark_entries))) { + if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_marks))) { struct audit_chunk *chunk; chunk = audit_tree_lookup(inode); if (chunk) { -- cgit v1.1 From d07754412f9cdc2f4a99318d5ee81ace6715ea99 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 21:24:24 -0500 Subject: fsnotify: rename fsnotify_find_mark_entry to fsnotify_find_mark the _entry portion of fsnotify functions is useless. Drop it. Signed-off-by: Eric Paris --- kernel/audit_tree.c | 12 ++++++------ kernel/audit_watch.c | 8 ++++---- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index f16f909..b20fb05 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -250,7 +250,7 @@ static void untag_chunk(struct node *p) list_del_rcu(&chunk->hash); spin_unlock(&hash_lock); spin_unlock(&entry->lock); - fsnotify_destroy_mark_by_entry(entry); + fsnotify_destroy_mark(entry); fsnotify_put_mark(entry); goto out; } @@ -293,7 +293,7 @@ static void untag_chunk(struct node *p) owner->root = new; spin_unlock(&hash_lock); spin_unlock(&entry->lock); - fsnotify_destroy_mark_by_entry(entry); + fsnotify_destroy_mark(entry); fsnotify_put_mark(entry); goto out; @@ -333,7 +333,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) spin_unlock(&hash_lock); chunk->dead = 1; spin_unlock(&entry->lock); - fsnotify_destroy_mark_by_entry(entry); + fsnotify_destroy_mark(entry); fsnotify_put_mark(entry); return 0; } @@ -361,7 +361,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) int n; spin_lock(&inode->i_lock); - old_entry = fsnotify_find_mark_entry(audit_tree_group, inode); + old_entry = fsnotify_find_mark(audit_tree_group, inode); spin_unlock(&inode->i_lock); if (!old_entry) return create_chunk(inode, tree); @@ -415,7 +415,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) spin_unlock(&chunk_entry->lock); spin_unlock(&old_entry->lock); - fsnotify_destroy_mark_by_entry(chunk_entry); + fsnotify_destroy_mark(chunk_entry); fsnotify_put_mark(chunk_entry); fsnotify_put_mark(old_entry); @@ -446,7 +446,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) spin_unlock(&hash_lock); spin_unlock(&chunk_entry->lock); spin_unlock(&old_entry->lock); - fsnotify_destroy_mark_by_entry(old_entry); + fsnotify_destroy_mark(old_entry); fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ fsnotify_put_mark(old_entry); /* and kill it */ return 0; diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index d8cb55a..24ecbeb 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -102,7 +102,7 @@ static inline struct audit_parent *audit_find_parent(struct inode *inode) struct fsnotify_mark *entry; spin_lock(&inode->i_lock); - entry = fsnotify_find_mark_entry(audit_watch_group, inode); + entry = fsnotify_find_mark(audit_watch_group, inode); spin_unlock(&inode->i_lock); if (entry) @@ -354,7 +354,7 @@ static void audit_remove_parent_watches(struct audit_parent *parent) } mutex_unlock(&audit_filter_mutex); - fsnotify_destroy_mark_by_entry(&parent->mark); + fsnotify_destroy_mark(&parent->mark); fsnotify_recalc_group_mask(audit_watch_group); @@ -504,7 +504,7 @@ void audit_remove_watch_rule(struct audit_krule *krule) if (list_empty(&parent->watches)) { audit_get_parent(parent); - fsnotify_destroy_mark_by_entry(&parent->mark); + fsnotify_destroy_mark(&parent->mark); audit_put_parent(parent); } } @@ -521,7 +521,7 @@ static bool audit_watch_should_send_event(struct fsnotify_group *group, struct i bool send; spin_lock(&inode->i_lock); - entry = fsnotify_find_mark_entry(group, inode); + entry = fsnotify_find_mark(group, inode); spin_unlock(&inode->i_lock); if (!entry) return false; -- cgit v1.1 From 35566087099c3ff8901d65ee98af56347ee66e5a Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Thu, 17 Dec 2009 21:24:25 -0500 Subject: fsnotify: take inode->i_lock inside fsnotify_find_mark_entry() All callers to fsnotify_find_mark_entry() except one take and release inode->i_lock around the call. Take the lock inside fsnotify_find_mark_entry() instead. Signed-off-by: Andreas Gruenbacher Signed-off-by: Eric Paris --- kernel/audit_tree.c | 2 -- kernel/audit_watch.c | 5 ----- 2 files changed, 7 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index b20fb05..80f8ac3 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -360,9 +360,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) struct node *p; int n; - spin_lock(&inode->i_lock); old_entry = fsnotify_find_mark(audit_tree_group, inode); - spin_unlock(&inode->i_lock); if (!old_entry) return create_chunk(inode, tree); diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 24ecbeb..d85fa53 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -101,10 +101,7 @@ static inline struct audit_parent *audit_find_parent(struct inode *inode) struct audit_parent *parent = NULL; struct fsnotify_mark *entry; - spin_lock(&inode->i_lock); entry = fsnotify_find_mark(audit_watch_group, inode); - spin_unlock(&inode->i_lock); - if (entry) parent = container_of(entry, struct audit_parent, mark); @@ -520,9 +517,7 @@ static bool audit_watch_should_send_event(struct fsnotify_group *group, struct i struct fsnotify_mark *entry; bool send; - spin_lock(&inode->i_lock); entry = fsnotify_find_mark(group, inode); - spin_unlock(&inode->i_lock); if (!entry) return false; -- cgit v1.1 From 11637e4b7dc098e9a863f0a619d55ebc60f5949e Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 21:24:25 -0500 Subject: fanotify: fanotify_init syscall declaration This patch defines a new syscall fanotify_init() of the form: int sys_fanotify_init(unsigned int flags, unsigned int event_f_flags, unsigned int priority) This syscall is used to create and fanotify group. This is very similar to the inotify_init() syscall. Signed-off-by: Eric Paris --- kernel/sys_ni.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 70f2ea7..2c4adc2 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -181,3 +181,6 @@ cond_syscall(sys_eventfd2); /* performance counters: */ cond_syscall(sys_perf_event_open); + +/* fanotify! */ +cond_syscall(sys_fanotify_init); -- cgit v1.1 From bbaa4168b2d2d8cc674e6d35806e8426aef464b8 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 21:24:26 -0500 Subject: fanotify: sys_fanotify_mark declartion This patch simply declares the new sys_fanotify_mark syscall int fanotify_mark(int fanotify_fd, unsigned int flags, u64_mask, int dfd const char *pathname) Signed-off-by: Eric Paris --- kernel/sys_ni.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 2c4adc2..bad369e 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -184,3 +184,4 @@ cond_syscall(sys_perf_event_open); /* fanotify! */ cond_syscall(sys_fanotify_init); +cond_syscall(sys_fanotify_mark); -- cgit v1.1 From 5444e2981c31d0ed7465475e451b8437084337e5 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 17 Dec 2009 21:24:27 -0500 Subject: fsnotify: split generic and inode specific mark code currently all marking is done by functions in inode-mark.c. Some of this is pretty generic and should be instead done in a generic function and we should only put the inode specific code in inode-mark.c Signed-off-by: Eric Paris --- kernel/audit_tree.c | 8 ++++---- kernel/audit_watch.c | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 80f8ac3..cfb97d7 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -259,7 +259,7 @@ static void untag_chunk(struct node *p) if (!new) goto Fallback; fsnotify_duplicate_mark(&new->mark, entry); - if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, 1)) { + if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { free_chunk(new); goto Fallback; } @@ -322,7 +322,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) return -ENOMEM; entry = &chunk->mark; - if (fsnotify_add_mark(entry, audit_tree_group, inode, 0)) { + if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) { free_chunk(chunk); return -ENOSPC; } @@ -360,7 +360,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) struct node *p; int n; - old_entry = fsnotify_find_mark(audit_tree_group, inode); + old_entry = fsnotify_find_inode_mark(audit_tree_group, inode); if (!old_entry) return create_chunk(inode, tree); @@ -395,7 +395,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) } fsnotify_duplicate_mark(chunk_entry, old_entry); - if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, 1)) { + if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) { spin_unlock(&old_entry->lock); free_chunk(chunk); fsnotify_put_mark(old_entry); diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index d85fa53..7499397 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -101,7 +101,7 @@ static inline struct audit_parent *audit_find_parent(struct inode *inode) struct audit_parent *parent = NULL; struct fsnotify_mark *entry; - entry = fsnotify_find_mark(audit_watch_group, inode); + entry = fsnotify_find_inode_mark(audit_watch_group, inode); if (entry) parent = container_of(entry, struct audit_parent, mark); @@ -158,7 +158,7 @@ static struct audit_parent *audit_init_parent(struct nameidata *ndp) fsnotify_init_mark(&parent->mark, audit_watch_free_mark); parent->mark.mask = AUDIT_FS_WATCH; - ret = fsnotify_add_mark(&parent->mark, audit_watch_group, inode, 0); + ret = fsnotify_add_mark(&parent->mark, audit_watch_group, inode, NULL, 0); if (ret < 0) { audit_free_parent(parent); return ERR_PTR(ret); @@ -517,7 +517,7 @@ static bool audit_watch_should_send_event(struct fsnotify_group *group, struct i struct fsnotify_mark *entry; bool send; - entry = fsnotify_find_mark(group, inode); + entry = fsnotify_find_inode_mark(group, inode); if (!entry) return false; -- cgit v1.1 From 6e006701ccc1590500186ef21e074bd900c5dd67 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Wed, 20 Jan 2010 22:27:56 +0200 Subject: dnotify: move dir_notify_enable declaration Move dir_notify_enable declaration to where it belongs -- dnotify.h . Signed-off-by: Alexey Dobriyan Signed-off-by: Eric Paris --- kernel/sysctl.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index d24f761f..7b983db 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include -- cgit v1.1 From d14f1729483fad3a8817fbbcbd017678b7d1ad26 Mon Sep 17 00:00:00 2001 From: Dave Young Date: Thu, 25 Feb 2010 20:28:57 -0500 Subject: sysctl extern cleanup: inotify Extern declarations in sysctl.c should be move to their own head file, and then include them in relavant .c files. Move inotify_table extern declaration to linux/inotify.h Signed-off-by: Dave Young Signed-off-by: Andrew Morton Signed-off-by: Eric Paris --- kernel/sysctl.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 7b983db..fe30db7 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -131,6 +131,9 @@ static int min_percpu_pagelist_fract = 8; static int ngroups_max = NGROUPS_MAX; +#ifdef CONFIG_INOTIFY_USER +#include +#endif #ifdef CONFIG_SPARC #include #endif @@ -207,9 +210,6 @@ static struct ctl_table fs_table[]; static struct ctl_table debug_table[]; static struct ctl_table dev_table[]; extern struct ctl_table random_table[]; -#ifdef CONFIG_INOTIFY_USER -extern struct ctl_table inotify_table[]; -#endif #ifdef CONFIG_EPOLL extern struct ctl_table epoll_table[]; #endif -- cgit v1.1 From 3bcf3860a4ff9bbc522820b4b765e65e4deceb3e Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Wed, 28 Jul 2010 10:18:37 -0400 Subject: fsnotify: store struct file not struct path Al explains that calling dentry_open() with a mnt/dentry pair is only garunteed to be safe if they are already used in an open struct file. To make sure this is the case don't store and use a struct path in fsnotify, always use a struct file. Signed-off-by: Eric Paris --- kernel/audit_watch.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 7499397..b955a22 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -545,8 +545,8 @@ static int audit_watch_handle_event(struct fsnotify_group *group, struct fsnotif return 0; switch (event->data_type) { - case (FSNOTIFY_EVENT_PATH): - inode = event->path.dentry->d_inode; + case (FSNOTIFY_EVENT_FILE): + inode = event->file->f_path.dentry->d_inode; break; case (FSNOTIFY_EVENT_INODE): inode = event->inode; -- cgit v1.1 From 3a9b16b407f10b2a771bcae13fb5791e527d6bcf Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Wed, 28 Jul 2010 10:18:38 -0400 Subject: fsnotify: send fsnotify_mark to groups in event handling functions With the change of fsnotify to use srcu walking the marks list instead of walking the global groups list we now know the mark in question. The code can send the mark to the group's handling functions and the groups won't have to find those marks themselves. Signed-off-by: Eric Paris --- kernel/audit_tree.c | 8 +++++--- kernel/audit_watch.c | 8 +++++--- 2 files changed, 10 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index cfb97d7..584b943 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -903,7 +903,9 @@ static void evict_chunk(struct audit_chunk *chunk) mutex_unlock(&audit_filter_mutex); } -static int audit_tree_handle_event(struct fsnotify_group *group, struct fsnotify_event *event) +static int audit_tree_handle_event(struct fsnotify_group *group, + struct fsnotify_mark *mark, + struct fsnotify_event *event) { BUG(); return -EOPNOTSUPP; @@ -918,8 +920,8 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify } static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, - struct vfsmount *mnt, __u32 mask, void *data, - int data_type) + struct vfsmount *mnt, struct fsnotify_mark *mark, + __u32 mask, void *data, int data_type) { return 0; } diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index b955a22..4d5ea03 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -511,8 +511,8 @@ void audit_remove_watch_rule(struct audit_krule *krule) } static bool audit_watch_should_send_event(struct fsnotify_group *group, struct inode *inode, - struct vfsmount *mnt, __u32 mask, void *data, - int data_type) + struct vfsmount *mnt, struct fsnotify_mark *mark, + __u32 mask, void *data, int data_type) { struct fsnotify_mark *entry; bool send; @@ -531,7 +531,9 @@ static bool audit_watch_should_send_event(struct fsnotify_group *group, struct i } /* Update watch data in audit rules based on fsnotify events. */ -static int audit_watch_handle_event(struct fsnotify_group *group, struct fsnotify_event *event) +static int audit_watch_handle_event(struct fsnotify_group *group, + struct fsnotify_mark *mark, + struct fsnotify_event *event) { struct inode *inode; __u32 mask = event->mask; -- cgit v1.1 From 4cd76a47924cd966799402d0f2bba356cde5c1b3 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Wed, 28 Jul 2010 10:18:38 -0400 Subject: audit: use the mark in handler functions audit now gets a mark in the should_send_event and handle_event functions. Rather than look up the mark themselves audit should just use the mark it was handed. Signed-off-by: Eric Paris --- kernel/audit_watch.c | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 4d5ea03..9173bcf 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -514,18 +514,10 @@ static bool audit_watch_should_send_event(struct fsnotify_group *group, struct i struct vfsmount *mnt, struct fsnotify_mark *mark, __u32 mask, void *data, int data_type) { - struct fsnotify_mark *entry; bool send; - entry = fsnotify_find_inode_mark(group, inode); - if (!entry) - return false; - mask = (mask & ~FS_EVENT_ON_CHILD); - send = (entry->mask & mask); - - /* find took a reference */ - fsnotify_put_mark(entry); + send = (mark->mask & mask); return send; } @@ -540,11 +532,9 @@ static int audit_watch_handle_event(struct fsnotify_group *group, const char *dname = event->file_name; struct audit_parent *parent; - BUG_ON(group != audit_watch_group); + parent = container_of(mark, struct audit_parent, mark); - parent = audit_find_parent(event->to_tell); - if (unlikely(!parent)) - return 0; + BUG_ON(group != audit_watch_group); switch (event->data_type) { case (FSNOTIFY_EVENT_FILE): @@ -565,10 +555,6 @@ static int audit_watch_handle_event(struct fsnotify_group *group, audit_update_watch(parent, dname, (dev_t)-1, (unsigned long)-1, 1); else if (mask & (FS_DELETE_SELF|FS_UNMOUNT|FS_MOVE_SELF)) audit_remove_parent_watches(parent); - /* moved put_inotify_watch to freeing mark */ - - /* matched the ref taken by audit_find_parent */ - audit_put_parent(parent); return 0; } -- cgit v1.1 From 2612abb51b11ffd2d75c472b11178115f5808909 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Wed, 28 Jul 2010 10:18:39 -0400 Subject: fsnotify: cleanup should_send_event The change to use srcu and walk the object list rather than the global fsnotify_group list means that should_send_event is no longer needed for a number of groups and can be simplified for others. Do that. Signed-off-by: Eric Paris --- kernel/audit_tree.c | 2 +- kernel/audit_watch.c | 7 +------ 2 files changed, 2 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 584b943..2abb99f 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -923,7 +923,7 @@ static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *in struct vfsmount *mnt, struct fsnotify_mark *mark, __u32 mask, void *data, int data_type) { - return 0; + return false; } static const struct fsnotify_ops audit_tree_ops = { diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 9173bcf..097a61c 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -514,12 +514,7 @@ static bool audit_watch_should_send_event(struct fsnotify_group *group, struct i struct vfsmount *mnt, struct fsnotify_mark *mark, __u32 mask, void *data, int data_type) { - bool send; - - mask = (mask & ~FS_EVENT_ON_CHILD); - send = (mark->mask & mask); - - return send; + return true; } /* Update watch data in audit rules based on fsnotify events. */ -- cgit v1.1 From 43709a288ed03aa0e2979ab63dd089b3889645c4 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Wed, 28 Jul 2010 10:18:39 -0400 Subject: fsnotify: remove group->mask group->mask is now useless. It was originally a shortcut for fsnotify to save on performance. These checks are now redundant, so we remove them. Signed-off-by: Eric Paris --- kernel/audit_watch.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 097a61c..1b87e75 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -164,8 +164,6 @@ static struct audit_parent *audit_init_parent(struct nameidata *ndp) return ERR_PTR(ret); } - fsnotify_recalc_group_mask(audit_watch_group); - return parent; } @@ -352,9 +350,6 @@ static void audit_remove_parent_watches(struct audit_parent *parent) mutex_unlock(&audit_filter_mutex); fsnotify_destroy_mark(&parent->mark); - - fsnotify_recalc_group_mask(audit_watch_group); - } /* Get path information necessary for adding watches. */ @@ -505,9 +500,6 @@ void audit_remove_watch_rule(struct audit_krule *krule) audit_put_parent(parent); } } - - fsnotify_recalc_group_mask(audit_watch_group); - } static bool audit_watch_should_send_event(struct fsnotify_group *group, struct inode *inode, -- cgit v1.1 From ce8f76fb7320297ccbe7c950fd9a2d727dd6a5a0 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Wed, 28 Jul 2010 10:18:39 -0400 Subject: fsnotify: pass both the vfsmount mark and inode mark should_send_event() and handle_event() will both need to look up the inode event if they get a vfsmount event. Lets just pass both at the same time since we have them both after walking the lists in lockstep. Signed-off-by: Eric Paris --- kernel/audit_tree.c | 6 ++++-- kernel/audit_watch.c | 8 +++++--- 2 files changed, 9 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 2abb99f..781ab7f 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -904,7 +904,8 @@ static void evict_chunk(struct audit_chunk *chunk) } static int audit_tree_handle_event(struct fsnotify_group *group, - struct fsnotify_mark *mark, + struct fsnotify_mark *inode_mark, + struct fsnotify_mark *vfsmonut_mark, struct fsnotify_event *event) { BUG(); @@ -920,7 +921,8 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify } static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, - struct vfsmount *mnt, struct fsnotify_mark *mark, + struct vfsmount *mnt, struct fsnotify_mark *inode_mark, + struct fsnotify_mark *vfsmount_mark, __u32 mask, void *data, int data_type) { return false; diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 1b87e75..a273cf3 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -503,7 +503,8 @@ void audit_remove_watch_rule(struct audit_krule *krule) } static bool audit_watch_should_send_event(struct fsnotify_group *group, struct inode *inode, - struct vfsmount *mnt, struct fsnotify_mark *mark, + struct vfsmount *mnt, struct fsnotify_mark *inode_mark, + struct fsnotify_mark *vfsmount_mark, __u32 mask, void *data, int data_type) { return true; @@ -511,7 +512,8 @@ static bool audit_watch_should_send_event(struct fsnotify_group *group, struct i /* Update watch data in audit rules based on fsnotify events. */ static int audit_watch_handle_event(struct fsnotify_group *group, - struct fsnotify_mark *mark, + struct fsnotify_mark *inode_mark, + struct fsnotify_mark *vfsmount_mark, struct fsnotify_event *event) { struct inode *inode; @@ -519,7 +521,7 @@ static int audit_watch_handle_event(struct fsnotify_group *group, const char *dname = event->file_name; struct audit_parent *parent; - parent = container_of(mark, struct audit_parent, mark); + parent = container_of(inode_mark, struct audit_parent, mark); BUG_ON(group != audit_watch_group); -- cgit v1.1 From 1968f5eed54ce47bde488fd9a450912e4a2d7138 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Wed, 28 Jul 2010 10:18:39 -0400 Subject: fanotify: use both marks when possible fanotify currently, when given a vfsmount_mark will look up (if it exists) the corresponding inode mark. This patch drops that lookup and uses the mark provided. Signed-off-by: Eric Paris --- kernel/audit_tree.c | 2 +- kernel/audit_watch.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 781ab7f..7f18d3a 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -921,7 +921,7 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify } static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, - struct vfsmount *mnt, struct fsnotify_mark *inode_mark, + struct fsnotify_mark *inode_mark, struct fsnotify_mark *vfsmount_mark, __u32 mask, void *data, int data_type) { diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index a273cf3..6bf2306 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -503,7 +503,7 @@ void audit_remove_watch_rule(struct audit_krule *krule) } static bool audit_watch_should_send_event(struct fsnotify_group *group, struct inode *inode, - struct vfsmount *mnt, struct fsnotify_mark *inode_mark, + struct fsnotify_mark *inode_mark, struct fsnotify_mark *vfsmount_mark, __u32 mask, void *data, int data_type) { -- cgit v1.1 From 157b1a23856b9fb7cc3d19fa2ddc650b502bab3d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 29 Jul 2010 10:22:48 +0200 Subject: kgdb: Do not access xtime directly The xtime cleanup missed the kgdb access to xtime. Fix it. Signed-off-by: Thomas Gleixner --- kernel/debug/kdb/kdb_main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index ebe4a28..a7b9f8d 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -2440,6 +2440,7 @@ static void kdb_sysinfo(struct sysinfo *val) */ static int kdb_summary(int argc, const char **argv) { + struct timespec now; struct kdb_tm tm; struct sysinfo val; @@ -2454,7 +2455,8 @@ static int kdb_summary(int argc, const char **argv) kdb_printf("domainname %s\n", init_uts_ns.name.domainname); kdb_printf("ccversion %s\n", __stringify(CCVERSION)); - kdb_gmtime(&xtime, &tm); + now = __current_kernel_time(); + kdb_gmtime(&now, &tm); kdb_printf("date %04d-%02d-%02d %02d:%02d:%02d " "tz_minuteswest %d\n", 1900+tm.tm_year, tm.tm_mon+1, tm.tm_mday, -- cgit v1.1 From 685fd0b4ea3f0f1d5385610b0d5b57775a8d5842 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Thu, 29 Jul 2010 11:16:32 +0100 Subject: irq: Add new IRQ flag IRQF_NO_SUSPEND A small number of users of IRQF_TIMER are using it for the implied no suspend behaviour on interrupts which are not timer interrupts. Therefore add a new IRQF_NO_SUSPEND flag, rename IRQF_TIMER to __IRQF_TIMER and redefine IRQF_TIMER in terms of these new flags. Signed-off-by: Ian Campbell Cc: Jeremy Fitzhardinge Cc: Dmitry Torokhov Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Grant Likely Cc: xen-devel@lists.xensource.com Cc: linux-input@vger.kernel.org Cc: linuxppc-dev@ozlabs.org Cc: devicetree-discuss@lists.ozlabs.org LKML-Reference: <1280398595-29708-1-git-send-email-ian.campbell@citrix.com> Signed-off-by: Thomas Gleixner --- kernel/irq/manage.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index e149748..c3003e9 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -216,7 +216,7 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) { if (suspend) { - if (!desc->action || (desc->action->flags & IRQF_TIMER)) + if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) return; desc->status |= IRQ_SUSPENDED; } -- cgit v1.1 From e6cc11707661770ca2bd4db4b0256d28f48e7541 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 27 Jul 2010 07:14:28 +0200 Subject: padata: Rename padata_alloc functions We rename padata_alloc to padata_alloc_possible because this function allocates a padata_instance and uses the cpu_possible mask for parallel and serial workers. Also we rename __padata_alloc to padata_alloc to avoid to export underlined functions. Underlined functions are considered to be private to padata. Users are updated accordingly. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/padata.c b/kernel/padata.c index 7f895e2..12860bc 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -1060,29 +1060,29 @@ static struct kobj_type padata_attr_type = { }; /** - * padata_alloc - Allocate and initialize padata instance. - * Use default cpumask(cpu_possible_mask) - * for serial and parallel workes. + * padata_alloc_possible - Allocate and initialize padata instance. + * Use the cpu_possible_mask for serial and + * parallel workers. * * @wq: workqueue to use for the allocated padata instance */ -struct padata_instance *padata_alloc(struct workqueue_struct *wq) +struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) { - return __padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); + return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); } -EXPORT_SYMBOL(padata_alloc); +EXPORT_SYMBOL(padata_alloc_possible); /** - * __padata_alloc - allocate and initialize a padata instance - * and specify cpumasks for serial and parallel workers. + * padata_alloc - allocate and initialize a padata instance and specify + * cpumasks for serial and parallel workers. * * @wq: workqueue to use for the allocated padata instance * @pcpumask: cpumask that will be used for padata parallelization * @cbcpumask: cpumask that will be used for padata serialization */ -struct padata_instance *__padata_alloc(struct workqueue_struct *wq, - const struct cpumask *pcpumask, - const struct cpumask *cbcpumask) +struct padata_instance *padata_alloc(struct workqueue_struct *wq, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask) { struct padata_instance *pinst; struct parallel_data *pd = NULL; @@ -1138,7 +1138,7 @@ err_free_inst: err: return NULL; } -EXPORT_SYMBOL(__padata_alloc); +EXPORT_SYMBOL(padata_alloc); /** * padata_free - free a padata instance -- cgit v1.1 From 65ff577e6b6e482ee9de3569e058edebdc02f069 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 27 Jul 2010 07:15:06 +0200 Subject: padata: Rearrange set_cpumask functions padata_set_cpumask needs to be protected by a lock. We make __padata_set_cpumasks unlocked and static. So this function can be used by the exported and locked padata_set_cpumask and padata_set_cpumasks functions. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 117 ++++++++++++++++++++++++++++++++------------------------ 1 file changed, 67 insertions(+), 50 deletions(-) (limited to 'kernel') diff --git a/kernel/padata.c b/kernel/padata.c index 12860bc..4987203 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -623,6 +623,66 @@ int padata_get_cpumask(struct padata_instance *pinst, } EXPORT_SYMBOL(padata_get_cpumask); +static int __padata_set_cpumasks(struct padata_instance *pinst, + cpumask_var_t pcpumask, + cpumask_var_t cbcpumask) +{ + int valid; + struct parallel_data *pd; + + valid = padata_validate_cpumask(pinst, pcpumask); + if (!valid) { + __padata_stop(pinst); + goto out_replace; + } + + valid = padata_validate_cpumask(pinst, cbcpumask); + if (!valid) + __padata_stop(pinst); + +out_replace: + pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); + if (!pd) + return -ENOMEM; + + cpumask_copy(pinst->cpumask.pcpu, pcpumask); + cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); + + padata_replace(pinst, pd); + + if (valid) + __padata_start(pinst); + + return 0; +} + +/** + * padata_set_cpumasks - Set both parallel and serial cpumasks. The first + * one is used by parallel workers and the second one + * by the wokers doing serialization. + * + * @pinst: padata instance + * @pcpumask: the cpumask to use for parallel workers + * @cbcpumask: the cpumsak to use for serial workers + */ +int padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask, + cpumask_var_t cbcpumask) +{ + int err; + + mutex_lock(&pinst->lock); + get_online_cpus(); + + err = __padata_set_cpumasks(pinst, pcpumask, cbcpumask); + + put_online_cpus(); + mutex_unlock(&pinst->lock); + + return err; + +} +EXPORT_SYMBOL(padata_set_cpumasks); + /** * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value * equivalent to @cpumask. @@ -636,6 +696,10 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask) { struct cpumask *serial_mask, *parallel_mask; + int err = -EINVAL; + + mutex_lock(&pinst->lock); + get_online_cpus(); switch (cpumask_type) { case PADATA_CPU_PARALLEL: @@ -647,65 +711,18 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, serial_mask = cpumask; break; default: - return -EINVAL; + goto out; } - return __padata_set_cpumasks(pinst, parallel_mask, serial_mask); -} -EXPORT_SYMBOL(padata_set_cpumask); - -/** - * __padata_set_cpumasks - Set both parallel and serial cpumasks. The first - * one is used by parallel workers and the second one - * by the wokers doing serialization. - * - * @pinst: padata instance - * @pcpumask: the cpumask to use for parallel workers - * @cbcpumask: the cpumsak to use for serial workers - */ -int __padata_set_cpumasks(struct padata_instance *pinst, - cpumask_var_t pcpumask, cpumask_var_t cbcpumask) -{ - int valid; - int err = 0; - struct parallel_data *pd = NULL; - - mutex_lock(&pinst->lock); - get_online_cpus(); - - valid = padata_validate_cpumask(pinst, pcpumask); - if (!valid) { - __padata_stop(pinst); - goto out_replace; - } - - valid = padata_validate_cpumask(pinst, cbcpumask); - if (!valid) - __padata_stop(pinst); - -out_replace: - pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); - if (!pd) { - err = -ENOMEM; - goto out; - } - - cpumask_copy(pinst->cpumask.pcpu, pcpumask); - cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); - - padata_replace(pinst, pd); - - if (valid) - __padata_start(pinst); + err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); out: put_online_cpus(); mutex_unlock(&pinst->lock); return err; - } -EXPORT_SYMBOL(__padata_set_cpumasks); +EXPORT_SYMBOL(padata_set_cpumask); static int __padata_add_cpu(struct padata_instance *pinst, int cpu) { -- cgit v1.1 From c635696c7c0fbc720698dbec34bb83e53df6a967 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 27 Jul 2010 07:15:50 +0200 Subject: padata: Pass the padata cpumasks to the cpumask_change_notifier chain We pass a pointer to the new padata cpumasks to the cpumask_change_notifier chain. So users can access the cpumasks without the need of an extra padata_get_cpumask function. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/padata.c b/kernel/padata.c index 4987203..1c8c1d1 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -538,7 +538,8 @@ static void padata_replace(struct padata_instance *pinst, if (notification_mask) blocking_notifier_call_chain(&pinst->cpumask_change_notifier, - notification_mask, pinst); + notification_mask, + &pd_new->cpumask); pinst->flags &= ~PADATA_RESET; } -- cgit v1.1 From 0500e9b3f11ce84fc6ee48a3e29909145e58ba48 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 27 Jul 2010 07:19:27 +0200 Subject: padata: Remove padata_get_cpumask A function that copies the padata cpumasks to a user buffer is a bit error prone. The cpumask can change any time so we can't be sure to have the right cpumask when using this function. A user who is interested in the padata cpumasks should register to the padata cpumask notifier chain instead. Users of padata_get_cpumask are already updated, so we can remove it. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 35 ----------------------------------- 1 file changed, 35 deletions(-) (limited to 'kernel') diff --git a/kernel/padata.c b/kernel/padata.c index 1c8c1d1..fd46792 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -589,41 +589,6 @@ static bool padata_validate_cpumask(struct padata_instance *pinst, return true; } -/** - * padata_get_cpumask: Fetch serial or parallel cpumask from the - * given padata instance and copy it to @out_mask - * - * @pinst: A pointer to padata instance - * @cpumask_type: Specifies which cpumask will be copied. - * Possible values are PADATA_CPU_SERIAL *or* PADATA_CPU_PARALLEL - * corresponding to serial and parallel cpumask respectively. - * @out_mask: A pointer to cpumask structure where selected - * cpumask will be copied. - */ -int padata_get_cpumask(struct padata_instance *pinst, - int cpumask_type, struct cpumask *out_mask) -{ - struct parallel_data *pd; - int ret = 0; - - rcu_read_lock_bh(); - pd = rcu_dereference(pinst->pd); - switch (cpumask_type) { - case PADATA_CPU_SERIAL: - cpumask_copy(out_mask, pd->cpumask.cbcpu); - break; - case PADATA_CPU_PARALLEL: - cpumask_copy(out_mask, pd->cpumask.pcpu); - break; - default: - ret = -EINVAL; - } - - rcu_read_unlock_bh(); - return ret; -} -EXPORT_SYMBOL(padata_get_cpumask); - static int __padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask, cpumask_var_t cbcpumask) -- cgit v1.1 From 098849516dd522a343e659740c8f1394a5b7fa69 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 1 Aug 2010 11:50:12 +0200 Subject: workqueue: explain for_each_*cwq_cpu() iterators for_each_*cwq_cpu() are similar to regular CPU iterators except that it also considers the pseudo CPU number used for unbound workqueues. Explain them. Signed-off-by: Tejun Heo Cc: Andrew Morton --- kernel/workqueue.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e5cb7fa..1105c47 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -271,6 +271,19 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); } +/* + * CPU iterators + * + * An extra gcwq is defined for an invalid cpu number + * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any + * specific CPU. The following iterators are similar to + * for_each_*_cpu() iterators but also considers the unbound gcwq. + * + * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND + * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND + * for_each_cwq_cpu() : possible CPUs for bound workqueues, + * WORK_CPU_UNBOUND for unbound workqueues + */ #define for_each_gcwq_cpu(cpu) \ for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \ (cpu) < WORK_CPU_NONE; \ -- cgit v1.1 From 6ee0578b4daaea01c96b172c6aacca43fd9807a6 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Fri, 30 Jul 2010 14:57:37 -0700 Subject: workqueue: mark init_workqueues() as early_initcall() Mark init_workqueues() as early_initcall() and thus it will be initialized before smp bringup. init_workqueues() registers for the hotcpu notifier and thus it should cope with the processors that are brought online after the workqueues are initialized. x86 smp bringup code uses workqueues and uses a workaround for the cold boot process (as the workqueues are initialized post smp_init()). Marking init_workqueues() as early_initcall() will pave the way for cleaning up this code. Signed-off-by: Suresh Siddha Signed-off-by: Tejun Heo Cc: Oleg Nesterov Cc: Andrew Morton --- kernel/workqueue.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1105c47..e2eb351 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3507,7 +3507,7 @@ out_unlock: } #endif /* CONFIG_FREEZER */ -void __init init_workqueues(void) +static int __init init_workqueues(void) { unsigned int cpu; int i; @@ -3559,4 +3559,6 @@ void __init init_workqueues(void) system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq); + return 0; } +early_initcall(init_workqueues); -- cgit v1.1 From 669336e4cf3e1cb95800f3f5924558a76d723c21 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 20 Jul 2010 17:29:54 +0200 Subject: perf: Use tracepoint_synchronize_unregister() to flush any pending tracepoint call We use synchronize_sched() to ensure a tracepoint won't be called while/after we release the perf buffers it references. But the tracepoint API has its own API for that: tracepoint_synchronize_unregister(). Use it instead as it's self-explanatory and eases maintainance. Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mathieu Desnoyers Cc: Steven Rostedt Cc: Li Zefan --- kernel/trace/trace_event_perf.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 2375165..000e6e8 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -131,10 +131,10 @@ void perf_trace_destroy(struct perf_event *p_event) tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER); /* - * Ensure our callback won't be called anymore. See - * tracepoint_probe_unregister() and __DO_TRACE(). + * Ensure our callback won't be called anymore. The buffers + * will be freed after that. */ - synchronize_sched(); + tracepoint_synchronize_unregister(); free_percpu(tp_event->perf_events); tp_event->perf_events = NULL; -- cgit v1.1 From af5ab277ded04bd9bc6b048c5a2f0e7d70ef0867 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Tue, 27 Jul 2010 21:02:10 -0700 Subject: clockevents: Remove the per cpu tick skew Historically, Linux has tried to make the regular timer tick on the various CPUs not happen at the same time, to avoid contention on xtime_lock. Nowadays, with the tickless kernel, this contention no longer happens since time keeping and updating are done differently. In addition, this skew is actually hurting power consumption in a measurable way on many-core systems. Signed-off-by: Arjan van de Ven LKML-Reference: <20100727210210.58d3118c@infradead.org> Signed-off-by: Thomas Gleixner --- kernel/time/tick-sched.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'kernel') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 813993b..74644cc 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -780,7 +780,6 @@ void tick_setup_sched_timer(void) { struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); ktime_t now = ktime_get(); - u64 offset; /* * Emulate tick processing via per-CPU hrtimers: @@ -790,10 +789,6 @@ void tick_setup_sched_timer(void) /* Get the next period (per cpu) */ hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); - offset = ktime_to_ns(tick_period) >> 1; - do_div(offset, num_possible_cpus()); - offset *= smp_processor_id(); - hrtimer_add_expires_ns(&ts->sched_timer, offset); for (;;) { hrtimer_forward(&ts->sched_timer, now, tick_period); -- cgit v1.1 From 8cadd2831bf3abc94f4530e7fdbab7bb39b6b27d Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Mon, 10 May 2010 14:26:20 -0700 Subject: timer: add on-stack deferrable timer interfaces In some cases (for instance with kernel threads) it may be desireable to use on-stack deferrable timers to get their power saving benefits. Add interfaces to support this for the IPS driver. Signed-off-by: Jesse Barnes Signed-off-by: Matthew Garrett --- kernel/timer.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'kernel') diff --git a/kernel/timer.c b/kernel/timer.c index ee305c8..efde11e 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -577,6 +577,19 @@ static void __init_timer(struct timer_list *timer, lockdep_init_map(&timer->lockdep_map, name, key, 0); } +void setup_deferrable_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key, + void (*function)(unsigned long), + unsigned long data) +{ + timer->function = function; + timer->data = data; + init_timer_on_stack_key(timer, name, key); + timer_set_deferrable(timer); +} +EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key); + /** * init_timer_key - initialize a timer * @timer: the timer to be initialized -- cgit v1.1 From e1b004c3ef9c59db5f013528628b51c8653155ec Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 4 Aug 2010 10:53:00 +0200 Subject: Revert "timer: Added usleep[_range] timer" This reverts commit 22b8f15c2f7130bb0386f548428df2ffd4e81903 to merge an advanced version. Signed-off-by: Thomas Gleixner --- kernel/timer.c | 22 ---------------------- 1 file changed, 22 deletions(-) (limited to 'kernel') diff --git a/kernel/timer.c b/kernel/timer.c index f110f24..ce98685 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1755,25 +1755,3 @@ unsigned long msleep_interruptible(unsigned int msecs) } EXPORT_SYMBOL(msleep_interruptible); - -static int __sched do_usleep_range(unsigned long min, unsigned long max) -{ - ktime_t kmin; - unsigned long delta; - - kmin = ktime_set(0, min * NSEC_PER_USEC); - delta = max - min; - return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL); -} - -/** - * usleep_range - Drop in replacement for udelay where wakeup is flexible - * @min: Minimum time in usecs to sleep - * @max: Maximum time in usecs to sleep - */ -void usleep_range(unsigned long min, unsigned long max) -{ - __set_current_state(TASK_UNINTERRUPTIBLE); - do_usleep_range(min, max); -} -EXPORT_SYMBOL(usleep_range); -- cgit v1.1 From 5e7f5a178bba45c5aca3448fddecabd4e28f1f6b Mon Sep 17 00:00:00 2001 From: Patrick Pannuto Date: Mon, 2 Aug 2010 15:01:04 -0700 Subject: timer: Added usleep_range timer usleep_range is a finer precision implementations of msleep and is designed to be a drop-in replacement for udelay where a precise sleep / busy-wait is unnecessary. Since an easy interface to hrtimers could lead to an undesired proliferation of interrupts, we provide only a "range" API, forcing the caller to think about an acceptable tolerance on both ends and hopefully avoiding introducing another interrupt. INTRO As discussed here ( http://lkml.org/lkml/2007/8/3/250 ), msleep(1) is not precise enough for many drivers (yes, sleep precision is an unfair notion, but consistently sleeping for ~an order of magnitude greater than requested is worth fixing). This patch adds a usleep API so that udelay does not have to be used. Obviously not every udelay can be replaced (those in atomic contexts or being used for simple bitbanging come to mind), but there are many, many examples of mydriver_write(...) /* Wait for hardware to latch */ udelay(100) in various drivers where a busy-wait loop is neither beneficial nor necessary, but msleep simply does not provide enough precision and people are using a busy-wait loop instead. CONCERNS FROM THE RFC Why is udelay a problem / necessary? Most callers of udelay are in device/ driver initialization code, which is serial... As I see it, there is only benefit to sleeping over a delay; the notion of "refactoring" areas that use udelay was presented, but I see usleep as the refactoring. Consider i2c, if the bus is busy, you need to wait a bit (say 100us) before trying again, your current options are: * udelay(100) * msleep(1) <-- As noted above, actually as high as ~20ms on some platforms, so not really an option * Manually set up an hrtimer to try again in 100us (which is what usleep does anyway...) People choose the udelay route because it is EASY; we need to provide a better easy route. Device / driver / boot code is *currently* serial, but every few months someone makes noise about parallelizing boot, and IMHO, a little forward-thinking now is one less thing to worry about if/when that ever happens udelay's could be preempted Sure, but if udelay plans on looping 1000 times, and it gets preempted on loop 200, whenever it's scheduled again, it is going to do the next 800 loops. Is the interruptible case needed? Probably not, but I see usleep as a very logical parallel to msleep, so it made sense to include the "full" API. Processors are getting faster (albeit not as quickly as they are becoming more parallel), so if someone wanted to be interruptible for a few usecs, why not let them? If this is a contentious point, I'm happy to remove it. OTHER THOUGHTS I believe there is also value in exposing the usleep_range option; it gives the scheduler a lot more flexibility and allows the programmer to express his intent much more clearly; it's something I would hope future driver writers will take advantage of. To get the results in the NUMBERS section below, I literally s/udelay/usleep the kernel tree; I had to go in and undo the changes to the USB drivers, but everything else booted successfully; I find that extremely telling in and of itself -- many people are using a delay API where a sleep will suit them just fine. SOME ATTEMPTS AT NUMBERS It turns out that calculating quantifiable benefit on this is challenging, so instead I will simply present the current state of things, and I hope this to be sufficient: How many udelay calls are there in 2.6.35-rc5? udealy(ARG) >= | COUNT 1000 | 319 500 | 414 100 | 1146 20 | 1832 I am working on Android, so that is my focus for this. The following table is a modified usleep that simply printk's the amount of time requested to sleep; these tests were run on a kernel with udelay >= 20 --> usleep "boot" is power-on to lock screen "power collapse" is when the power button is pushed and the device suspends "resume" is when the power button is pushed and the lock screen is displayed (no touchscreen events or anything, just turning on the display) "use device" is from the unlock swipe to clicking around a bit; there is no sd card in this phone, so fail loading music, video, camera ACTION | TOTAL NUMBER OF USLEEP CALLS | NET TIME (us) boot | 22 | 1250 power-collapse | 9 | 1200 resume | 5 | 500 use device | 59 | 7700 The most interesting category to me is the "use device" field; 7700us of busy-wait time that could be put towards better responsiveness, or at the least less power usage. Signed-off-by: Patrick Pannuto Cc: apw@canonical.com Cc: corbet@lwn.net Cc: arjan@linux.intel.com Cc: Randy Dunlap Cc: Andrew Morton Signed-off-by: Thomas Gleixner --- kernel/timer.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'kernel') diff --git a/kernel/timer.c b/kernel/timer.c index ce98685..723a62e 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1755,3 +1755,25 @@ unsigned long msleep_interruptible(unsigned int msecs) } EXPORT_SYMBOL(msleep_interruptible); + +static int __sched do_usleep_range(unsigned long min, unsigned long max) +{ + ktime_t kmin; + unsigned long delta; + + kmin = ktime_set(0, min * NSEC_PER_USEC); + delta = (max - min) * NSEC_PER_USEC; + return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL); +} + +/** + * usleep_range - Drop in replacement for udelay where wakeup is flexible + * @min: Minimum time in usecs to sleep + * @max: Maximum time in usecs to sleep + */ +void usleep_range(unsigned long min, unsigned long max) +{ + __set_current_state(TASK_UNINTERRUPTIBLE); + do_usleep_range(min, max); +} +EXPORT_SYMBOL(usleep_range); -- cgit v1.1 From 9da79ab83ee33ddc1fdd0858fd3d70925a1bde99 Mon Sep 17 00:00:00 2001 From: Srikar Dronamraju Date: Wed, 30 Jun 2010 14:15:48 +0530 Subject: tracing/kprobes: unregister_trace_probe needs to be called under mutex Comment in unregister_trace_probe() says probe_lock will be held when it gets called. However there is a case where it might called without the probe_lock being held. Also since we are traversing the probe_list and deleting an element from the probe_list, probe_lock should be held. This was first pointed in uprobes traceevent review by Frederic Weisbecker here. (http://lkml.org/lkml/2010/5/12/106) Cc: Ingo Molnar Cc: Masami Hiramatsu Acked-by: Masami Hiramatsu Acked-by: Steven Rostedt LKML-Reference: <20100630084548.GA10325@linux.vnet.ibm.com> Signed-off-by: Srikar Dronamraju Signed-off-by: Arnaldo Carvalho de Melo --- kernel/trace/trace_kprobe.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 1b79d1c..8b27c98 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -925,14 +925,17 @@ static int create_trace_probe(int argc, char **argv) pr_info("Delete command needs an event name.\n"); return -EINVAL; } + mutex_lock(&probe_lock); tp = find_probe_event(event, group); if (!tp) { + mutex_unlock(&probe_lock); pr_info("Event %s/%s doesn't exist.\n", group, event); return -ENOENT; } /* delete an event */ unregister_trace_probe(tp); free_trace_probe(tp); + mutex_unlock(&probe_lock); return 0; } -- cgit v1.1 From 694f690d27dadccc8cb9d90532e76593b61fe098 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 4 Aug 2010 16:59:14 +0100 Subject: CRED: Fix RCU warning due to previous patch fixing __task_cred()'s checks Commit 8f92054e7ca1 ("CRED: Fix __task_cred()'s lockdep check and banner comment") fixed the lockdep checks on __task_cred(). This has shown up a place in the signalling code where a lock should be held - namely that check_kill_permission() requires its callers to hold the RCU lock. Fix group_send_sig_info() to get the RCU read lock around its call to check_kill_permission(). Without this patch, the following warning can occur: =================================================== [ INFO: suspicious rcu_dereference_check() usage. ] --------------------------------------------------- kernel/signal.c:660 invoked rcu_dereference_check() without protection! ... Reported-by: Tetsuo Handa Signed-off-by: David Howells Acked-by: Oleg Nesterov Signed-off-by: Linus Torvalds --- kernel/signal.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index 906ae5a1..bded651 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -637,7 +637,7 @@ static inline bool si_fromuser(const struct siginfo *info) /* * Bad permissions for sending the signal - * - the caller must hold at least the RCU read lock + * - the caller must hold the RCU read lock */ static int check_kill_permission(int sig, struct siginfo *info, struct task_struct *t) @@ -1127,11 +1127,14 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long /* * send signal info to all the members of a group - * - the caller must hold the RCU read lock at least */ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { - int ret = check_kill_permission(sig, info, p); + int ret; + + rcu_read_lock(); + ret = check_kill_permission(sig, info, p); + rcu_read_unlock(); if (!ret && sig) ret = do_send_sig_info(sig, info, p, true); -- cgit v1.1 From 2409e74278b7fb917d39ef6d3c16223c04a386f2 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 5 Aug 2010 12:59:00 -0600 Subject: module: module_unload_init() cleanup No need to clear mod->refptr in module_unload_init(), since alloc_percpu() already clears allocated chunks. Signed-off-by: Eric Dumazet Signed-off-by: Rusty Russell (removed unused var) --- kernel/module.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 6c56282..404e722 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -526,14 +526,8 @@ EXPORT_TRACEPOINT_SYMBOL(module_get); /* Init the unload section of the module. */ static void module_unload_init(struct module *mod) { - int cpu; - INIT_LIST_HEAD(&mod->source_list); INIT_LIST_HEAD(&mod->target_list); - for_each_possible_cpu(cpu) { - per_cpu_ptr(mod->refptr, cpu)->incs = 0; - per_cpu_ptr(mod->refptr, cpu)->decs = 0; - } /* Hold reference count during initialization. */ __this_cpu_write(mod->refptr->incs, 1); -- cgit v1.1 From f91a13bb99b73961d4e2743a6ff296ac553abc4f Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 5 Aug 2010 12:59:02 -0600 Subject: module: refactor load_module I'd start from the trivial stuff. There's a fair amount of straight-line code that just makes the function hard to read just because you have to page up and down so far. Some of it is trivial to just create a helper function for. Signed-off-by: Linus Torvalds Signed-off-by: Rusty Russell --- kernel/module.c | 130 ++++++++++++++++++++++++++++++-------------------------- 1 file changed, 69 insertions(+), 61 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 404e722..12905ed 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2107,6 +2107,71 @@ static inline void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, } #endif +static void find_module_sections(struct module *mod, Elf_Ehdr *hdr, + Elf_Shdr *sechdrs, const char *secstrings) +{ + mod->kp = section_objs(hdr, sechdrs, secstrings, "__param", + sizeof(*mod->kp), &mod->num_kp); + mod->syms = section_objs(hdr, sechdrs, secstrings, "__ksymtab", + sizeof(*mod->syms), &mod->num_syms); + mod->crcs = section_addr(hdr, sechdrs, secstrings, "__kcrctab"); + mod->gpl_syms = section_objs(hdr, sechdrs, secstrings, "__ksymtab_gpl", + sizeof(*mod->gpl_syms), + &mod->num_gpl_syms); + mod->gpl_crcs = section_addr(hdr, sechdrs, secstrings, "__kcrctab_gpl"); + mod->gpl_future_syms = section_objs(hdr, sechdrs, secstrings, + "__ksymtab_gpl_future", + sizeof(*mod->gpl_future_syms), + &mod->num_gpl_future_syms); + mod->gpl_future_crcs = section_addr(hdr, sechdrs, secstrings, + "__kcrctab_gpl_future"); + +#ifdef CONFIG_UNUSED_SYMBOLS + mod->unused_syms = section_objs(hdr, sechdrs, secstrings, + "__ksymtab_unused", + sizeof(*mod->unused_syms), + &mod->num_unused_syms); + mod->unused_crcs = section_addr(hdr, sechdrs, secstrings, + "__kcrctab_unused"); + mod->unused_gpl_syms = section_objs(hdr, sechdrs, secstrings, + "__ksymtab_unused_gpl", + sizeof(*mod->unused_gpl_syms), + &mod->num_unused_gpl_syms); + mod->unused_gpl_crcs = section_addr(hdr, sechdrs, secstrings, + "__kcrctab_unused_gpl"); +#endif +#ifdef CONFIG_CONSTRUCTORS + mod->ctors = section_objs(hdr, sechdrs, secstrings, ".ctors", + sizeof(*mod->ctors), &mod->num_ctors); +#endif + +#ifdef CONFIG_TRACEPOINTS + mod->tracepoints = section_objs(hdr, sechdrs, secstrings, + "__tracepoints", + sizeof(*mod->tracepoints), + &mod->num_tracepoints); +#endif +#ifdef CONFIG_EVENT_TRACING + mod->trace_events = section_objs(hdr, sechdrs, secstrings, + "_ftrace_events", + sizeof(*mod->trace_events), + &mod->num_trace_events); + /* + * This section contains pointers to allocated objects in the trace + * code and not scanning it leads to false positives. + */ + kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * + mod->num_trace_events, GFP_KERNEL); +#endif +#ifdef CONFIG_FTRACE_MCOUNT_RECORD + /* sechdrs[0].sh_size is always zero */ + mod->ftrace_callsites = section_objs(hdr, sechdrs, secstrings, + "__mcount_loc", + sizeof(*mod->ftrace_callsites), + &mod->num_ftrace_callsites); +#endif +} + /* Allocate and load the module: note that size of section 0 is always zero, and we rely on this for optional sections. */ static noinline struct module *load_module(void __user *umod, @@ -2147,7 +2212,7 @@ static noinline struct module *load_module(void __user *umod, } /* Sanity checks against insmoding binaries or wrong arch, - weird elf version */ + weird elf version */ if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0 || hdr->e_type != ET_REL || !elf_check_arch(hdr) @@ -2326,7 +2391,8 @@ static noinline struct module *load_module(void __user *umod, sechdrs[i].sh_size); /* Update sh_addr to point to copy in image. */ sechdrs[i].sh_addr = (unsigned long)dest; - DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name); + DEBUGP("\t0x%lx %s\n", + sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name); } /* Module has been moved. */ mod = (void *)sechdrs[modindex].sh_addr; @@ -2368,66 +2434,8 @@ static noinline struct module *load_module(void __user *umod, /* Now we've got everything in the final locations, we can * find optional sections. */ - mod->kp = section_objs(hdr, sechdrs, secstrings, "__param", - sizeof(*mod->kp), &mod->num_kp); - mod->syms = section_objs(hdr, sechdrs, secstrings, "__ksymtab", - sizeof(*mod->syms), &mod->num_syms); - mod->crcs = section_addr(hdr, sechdrs, secstrings, "__kcrctab"); - mod->gpl_syms = section_objs(hdr, sechdrs, secstrings, "__ksymtab_gpl", - sizeof(*mod->gpl_syms), - &mod->num_gpl_syms); - mod->gpl_crcs = section_addr(hdr, sechdrs, secstrings, "__kcrctab_gpl"); - mod->gpl_future_syms = section_objs(hdr, sechdrs, secstrings, - "__ksymtab_gpl_future", - sizeof(*mod->gpl_future_syms), - &mod->num_gpl_future_syms); - mod->gpl_future_crcs = section_addr(hdr, sechdrs, secstrings, - "__kcrctab_gpl_future"); + find_module_sections(mod, hdr, sechdrs, secstrings); -#ifdef CONFIG_UNUSED_SYMBOLS - mod->unused_syms = section_objs(hdr, sechdrs, secstrings, - "__ksymtab_unused", - sizeof(*mod->unused_syms), - &mod->num_unused_syms); - mod->unused_crcs = section_addr(hdr, sechdrs, secstrings, - "__kcrctab_unused"); - mod->unused_gpl_syms = section_objs(hdr, sechdrs, secstrings, - "__ksymtab_unused_gpl", - sizeof(*mod->unused_gpl_syms), - &mod->num_unused_gpl_syms); - mod->unused_gpl_crcs = section_addr(hdr, sechdrs, secstrings, - "__kcrctab_unused_gpl"); -#endif -#ifdef CONFIG_CONSTRUCTORS - mod->ctors = section_objs(hdr, sechdrs, secstrings, ".ctors", - sizeof(*mod->ctors), &mod->num_ctors); -#endif - -#ifdef CONFIG_TRACEPOINTS - mod->tracepoints = section_objs(hdr, sechdrs, secstrings, - "__tracepoints", - sizeof(*mod->tracepoints), - &mod->num_tracepoints); -#endif -#ifdef CONFIG_EVENT_TRACING - mod->trace_events = section_objs(hdr, sechdrs, secstrings, - "_ftrace_events", - sizeof(*mod->trace_events), - &mod->num_trace_events); - /* - * This section contains pointers to allocated objects in the trace - * code and not scanning it leads to false positives. - */ - kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * - mod->num_trace_events, GFP_KERNEL); -#endif -#ifdef CONFIG_FTRACE_MCOUNT_RECORD - /* sechdrs[0].sh_size is always zero */ - mod->ftrace_callsites = section_objs(hdr, sechdrs, secstrings, - "__mcount_loc", - sizeof(*mod->ftrace_callsites), - &mod->num_ftrace_callsites); -#endif #ifdef CONFIG_MODVERSIONS if ((mod->num_syms && !mod->crcs) || (mod->num_gpl_syms && !mod->gpl_crcs) -- cgit v1.1 From 65b8a9b4d5525348e55cf63a43517610ee8e0970 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 5 Aug 2010 12:59:02 -0600 Subject: module: refactor load_module part 2 Here's a second one. It's slightly less trivial - since we now have error cases - and equally untested so it may well be totally broken. But it also cleans up a bit more, and avoids one of the goto targets, because the "move_module()" helper now does both allocations or none. Signed-off-by: Linus Torvalds Signed-off-by: Rusty Russell --- kernel/module.c | 129 +++++++++++++++++++++++++++++++------------------------- 1 file changed, 72 insertions(+), 57 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 12905ed..19ddcb0 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2082,7 +2082,8 @@ static void *module_alloc_update_bounds(unsigned long size) #ifdef CONFIG_DEBUG_KMEMLEAK static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, - Elf_Shdr *sechdrs, char *secstrings) + const Elf_Shdr *sechdrs, + const char *secstrings) { unsigned int i; @@ -2102,7 +2103,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, } #else static inline void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, - Elf_Shdr *sechdrs, char *secstrings) + Elf_Shdr *sechdrs, + const char *secstrings) { } #endif @@ -2172,6 +2174,70 @@ static void find_module_sections(struct module *mod, Elf_Ehdr *hdr, #endif } +static struct module *move_module(struct module *mod, + Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + const char *secstrings, unsigned modindex) +{ + int i; + void *ptr; + + /* Do the allocs. */ + ptr = module_alloc_update_bounds(mod->core_size); + /* + * The pointer to this block is stored in the module structure + * which is inside the block. Just mark it as not being a + * leak. + */ + kmemleak_not_leak(ptr); + if (!ptr) + return ERR_PTR(-ENOMEM); + + memset(ptr, 0, mod->core_size); + mod->module_core = ptr; + + ptr = module_alloc_update_bounds(mod->init_size); + /* + * The pointer to this block is stored in the module structure + * which is inside the block. This block doesn't need to be + * scanned as it contains data and code that will be freed + * after the module is initialized. + */ + kmemleak_ignore(ptr); + if (!ptr && mod->init_size) { + module_free(mod, mod->module_core); + return ERR_PTR(-ENOMEM); + } + memset(ptr, 0, mod->init_size); + mod->module_init = ptr; + + /* Transfer each section which specifies SHF_ALLOC */ + DEBUGP("final section addresses:\n"); + for (i = 0; i < hdr->e_shnum; i++) { + void *dest; + + if (!(sechdrs[i].sh_flags & SHF_ALLOC)) + continue; + + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) + dest = mod->module_init + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK); + else + dest = mod->module_core + sechdrs[i].sh_entsize; + + if (sechdrs[i].sh_type != SHT_NOBITS) + memcpy(dest, (void *)sechdrs[i].sh_addr, + sechdrs[i].sh_size); + /* Update sh_addr to point to copy in image. */ + sechdrs[i].sh_addr = (unsigned long)dest; + DEBUGP("\t0x%lx %s\n", + sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name); + } + /* Module has been moved. */ + mod = (void *)sechdrs[modindex].sh_addr; + kmemleak_load_module(mod, hdr, sechdrs, secstrings); + return mod; +} + /* Allocate and load the module: note that size of section 0 is always zero, and we rely on this for optional sections. */ static noinline struct module *load_module(void __user *umod, @@ -2188,7 +2254,6 @@ static noinline struct module *load_module(void __user *umod, unsigned int modindex, versindex, infoindex, pcpuindex; struct module *mod; long err = 0; - void *ptr = NULL; /* Stops spurious gcc warning */ unsigned long symoffs, stroffs, *strmap; void __percpu *percpu; struct _ddebug *debug = NULL; @@ -2342,61 +2407,12 @@ static noinline struct module *load_module(void __user *umod, symoffs = layout_symtab(mod, sechdrs, symindex, strindex, hdr, secstrings, &stroffs, strmap); - /* Do the allocs. */ - ptr = module_alloc_update_bounds(mod->core_size); - /* - * The pointer to this block is stored in the module structure - * which is inside the block. Just mark it as not being a - * leak. - */ - kmemleak_not_leak(ptr); - if (!ptr) { - err = -ENOMEM; + /* Allocate and move to the final place */ + mod = move_module(mod, hdr, sechdrs, secstrings, modindex); + if (IS_ERR(mod)) { + err = PTR_ERR(mod); goto free_percpu; } - memset(ptr, 0, mod->core_size); - mod->module_core = ptr; - - ptr = module_alloc_update_bounds(mod->init_size); - /* - * The pointer to this block is stored in the module structure - * which is inside the block. This block doesn't need to be - * scanned as it contains data and code that will be freed - * after the module is initialized. - */ - kmemleak_ignore(ptr); - if (!ptr && mod->init_size) { - err = -ENOMEM; - goto free_core; - } - memset(ptr, 0, mod->init_size); - mod->module_init = ptr; - - /* Transfer each section which specifies SHF_ALLOC */ - DEBUGP("final section addresses:\n"); - for (i = 0; i < hdr->e_shnum; i++) { - void *dest; - - if (!(sechdrs[i].sh_flags & SHF_ALLOC)) - continue; - - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) - dest = mod->module_init - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK); - else - dest = mod->module_core + sechdrs[i].sh_entsize; - - if (sechdrs[i].sh_type != SHT_NOBITS) - memcpy(dest, (void *)sechdrs[i].sh_addr, - sechdrs[i].sh_size); - /* Update sh_addr to point to copy in image. */ - sechdrs[i].sh_addr = (unsigned long)dest; - DEBUGP("\t0x%lx %s\n", - sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name); - } - /* Module has been moved. */ - mod = (void *)sechdrs[modindex].sh_addr; - kmemleak_load_module(mod, hdr, sechdrs, secstrings); #if defined(CONFIG_MODULE_UNLOAD) mod->refptr = alloc_percpu(struct module_ref); @@ -2580,7 +2596,6 @@ static noinline struct module *load_module(void __user *umod, free_init: #endif module_free(mod, mod->module_init); - free_core: module_free(mod, mod->module_core); /* mod will be freed with core. Don't access it beyond this line! */ free_percpu: -- cgit v1.1 From 40dd2560ec2df21036db9ec8b971f92d98fd1969 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Aug 2010 12:59:03 -0600 Subject: module: refactor load_module part 3 Extract out the allocation and copying in from userspace, and the first set of modinfo checks. Signed-off-by: Rusty Russell --- kernel/module.c | 122 ++++++++++++++++++++++++++++++++++---------------------- 1 file changed, 75 insertions(+), 47 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 19ddcb0..d8faf35 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1803,7 +1803,7 @@ static char *next_string(char *string, unsigned long *secsize) return string; } -static char *get_modinfo(Elf_Shdr *sechdrs, +static char *get_modinfo(const Elf_Shdr *sechdrs, unsigned int info, const char *tag) { @@ -2109,6 +2109,73 @@ static inline void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, } #endif +static int copy_and_check(Elf_Ehdr **hdrp, + const void __user *umod, unsigned long len) +{ + int err; + Elf_Ehdr *hdr; + + if (len < sizeof(*hdr)) + return -ENOEXEC; + + /* Suck in entire file: we'll want most of it. */ + /* vmalloc barfs on "unusual" numbers. Check here */ + if (len > 64 * 1024 * 1024 || (hdr = *hdrp = vmalloc(len)) == NULL) + return -ENOMEM; + + if (copy_from_user(hdr, umod, len) != 0) { + err = -EFAULT; + goto free_hdr; + } + + /* Sanity checks against insmoding binaries or wrong arch, + weird elf version */ + if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0 + || hdr->e_type != ET_REL + || !elf_check_arch(hdr) + || hdr->e_shentsize != sizeof(Elf_Shdr)) { + err = -ENOEXEC; + goto free_hdr; + } + + if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) { + err = -ENOEXEC; + goto free_hdr; + } + return 0; + +free_hdr: + vfree(hdr); + return err; +} + +static int check_modinfo(struct module *mod, + const Elf_Shdr *sechdrs, + unsigned int infoindex, unsigned int versindex) +{ + const char *modmagic = get_modinfo(sechdrs, infoindex, "vermagic"); + int err; + + /* This is allowed: modprobe --force will invalidate it. */ + if (!modmagic) { + err = try_to_force_load(mod, "bad vermagic"); + if (err) + return err; + } else if (!same_magic(modmagic, vermagic, versindex)) { + printk(KERN_ERR "%s: version magic '%s' should be '%s'\n", + mod->name, modmagic, vermagic); + return -ENOEXEC; + } + + if (get_modinfo(sechdrs, infoindex, "staging")) { + add_taint_module(mod, TAINT_CRAP); + printk(KERN_WARNING "%s: module is from the staging directory," + " the quality is unknown, you have been warned.\n", + mod->name); + } + return 0; +} + static void find_module_sections(struct module *mod, Elf_Ehdr *hdr, Elf_Shdr *sechdrs, const char *secstrings) { @@ -2246,14 +2313,13 @@ static noinline struct module *load_module(void __user *umod, { Elf_Ehdr *hdr; Elf_Shdr *sechdrs; - char *secstrings, *args, *modmagic, *strtab = NULL; - char *staging; + char *secstrings, *args, *strtab = NULL; unsigned int i; unsigned int symindex = 0; unsigned int strindex = 0; unsigned int modindex, versindex, infoindex, pcpuindex; struct module *mod; - long err = 0; + long err; unsigned long symoffs, stroffs, *strmap; void __percpu *percpu; struct _ddebug *debug = NULL; @@ -2263,31 +2329,10 @@ static noinline struct module *load_module(void __user *umod, DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", umod, len, uargs); - if (len < sizeof(*hdr)) - return ERR_PTR(-ENOEXEC); - - /* Suck in entire file: we'll want most of it. */ - /* vmalloc barfs on "unusual" numbers. Check here */ - if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL) - return ERR_PTR(-ENOMEM); - - if (copy_from_user(hdr, umod, len) != 0) { - err = -EFAULT; - goto free_hdr; - } - - /* Sanity checks against insmoding binaries or wrong arch, - weird elf version */ - if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0 - || hdr->e_type != ET_REL - || !elf_check_arch(hdr) - || hdr->e_shentsize != sizeof(*sechdrs)) { - err = -ENOEXEC; - goto free_hdr; - } - if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) - goto truncated; + err = copy_and_check(&hdr, umod, len); + if (err) + return ERR_PTR(err); /* Convenience variables */ sechdrs = (void *)hdr + hdr->e_shoff; @@ -2347,26 +2392,9 @@ static noinline struct module *load_module(void __user *umod, goto free_hdr; } - modmagic = get_modinfo(sechdrs, infoindex, "vermagic"); - /* This is allowed: modprobe --force will invalidate it. */ - if (!modmagic) { - err = try_to_force_load(mod, "bad vermagic"); - if (err) - goto free_hdr; - } else if (!same_magic(modmagic, vermagic, versindex)) { - printk(KERN_ERR "%s: version magic '%s' should be '%s'\n", - mod->name, modmagic, vermagic); - err = -ENOEXEC; + err = check_modinfo(mod, sechdrs, infoindex, versindex); + if (err) goto free_hdr; - } - - staging = get_modinfo(sechdrs, infoindex, "staging"); - if (staging) { - add_taint_module(mod, TAINT_CRAP); - printk(KERN_WARNING "%s: module is from the staging directory," - " the quality is unknown, you have been warned.\n", - mod->name); - } /* Now copy in args */ args = strndup_user(uargs, ~0UL >> 1); -- cgit v1.1 From 9f85a4bbb1cf56f65b3d065a5f88204a757f2325 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Aug 2010 12:59:04 -0600 Subject: module: refactor load_module part 4 Allocate references inside module_unload_init(), clean up inside module_unload_free(). This version fixed to do allocation before __this_cpu_write, thanks to bug reports from linux-next from Dave Young and Stephen Rothwell . Signed-off-by: Rusty Russell --- kernel/module.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index d8faf35..241bc41 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -524,8 +524,12 @@ static char last_unloaded_module[MODULE_NAME_LEN+1]; EXPORT_TRACEPOINT_SYMBOL(module_get); /* Init the unload section of the module. */ -static void module_unload_init(struct module *mod) +static int module_unload_init(struct module *mod) { + mod->refptr = alloc_percpu(struct module_ref); + if (!mod->refptr) + return -ENOMEM; + INIT_LIST_HEAD(&mod->source_list); INIT_LIST_HEAD(&mod->target_list); @@ -533,6 +537,8 @@ static void module_unload_init(struct module *mod) __this_cpu_write(mod->refptr->incs, 1); /* Backwards compatibility macros put refcount during init. */ mod->waiter = current; + + return 0; } /* Does a already use b? */ @@ -612,6 +618,8 @@ static void module_unload_free(struct module *mod) kfree(use); } mutex_unlock(&module_mutex); + + free_percpu(mod->refptr); } #ifdef CONFIG_MODULE_FORCE_UNLOAD @@ -885,8 +893,9 @@ int ref_module(struct module *a, struct module *b) } EXPORT_SYMBOL_GPL(ref_module); -static inline void module_unload_init(struct module *mod) +static inline int module_unload_init(struct module *mod) { + return 0; } #endif /* CONFIG_MODULE_UNLOAD */ @@ -1559,10 +1568,7 @@ static void free_module(struct module *mod) module_free(mod, mod->module_init); kfree(mod->args); percpu_modfree(mod); -#if defined(CONFIG_MODULE_UNLOAD) - if (mod->refptr) - free_percpu(mod->refptr); -#endif + /* Free lock-classes: */ lockdep_free_key_range(mod->module_core, mod->core_size); @@ -2442,15 +2448,10 @@ static noinline struct module *load_module(void __user *umod, goto free_percpu; } -#if defined(CONFIG_MODULE_UNLOAD) - mod->refptr = alloc_percpu(struct module_ref); - if (!mod->refptr) { - err = -ENOMEM; - goto free_init; - } -#endif /* Now we've moved module, initialize linked lists, etc. */ - module_unload_init(mod); + err = module_unload_init(mod); + if (err) + goto free_init; /* Set up license info based on the info section */ set_license(mod, get_modinfo(sechdrs, infoindex, "license")); @@ -2619,10 +2620,7 @@ static noinline struct module *load_module(void __user *umod, cleanup: free_modinfo(mod); module_unload_free(mod); -#if defined(CONFIG_MODULE_UNLOAD) - free_percpu(mod->refptr); free_init: -#endif module_free(mod, mod->module_init); module_free(mod, mod->module_core); /* mod will be freed with core. Don't access it beyond this line! */ -- cgit v1.1 From 22e268ebecc549f1b1907f114cb44d6044bdee3c Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Aug 2010 12:59:05 -0600 Subject: module: refactor load_module part 5 1) Extract out the relocation loop into apply_relocations 2) Extract license and version checks into check_module_license_and_versions 3) Extract icache flushing into flush_module_icache 4) Move __obsparm warning into find_module_sections 5) Move license setting into check_modinfo. Signed-off-by: Rusty Russell --- kernel/module.c | 182 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 106 insertions(+), 76 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 241bc41..b536db8 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1698,6 +1698,39 @@ static int simplify_symbols(Elf_Shdr *sechdrs, return ret; } +static int apply_relocations(struct module *mod, + Elf_Ehdr *hdr, + Elf_Shdr *sechdrs, + unsigned int symindex, + unsigned int strindex) +{ + unsigned int i; + int err = 0; + + /* Now do relocations. */ + for (i = 1; i < hdr->e_shnum; i++) { + const char *strtab = (char *)sechdrs[strindex].sh_addr; + unsigned int info = sechdrs[i].sh_info; + + /* Not a valid relocation section? */ + if (info >= hdr->e_shnum) + continue; + + /* Don't bother with non-allocated sections */ + if (!(sechdrs[info].sh_flags & SHF_ALLOC)) + continue; + + if (sechdrs[i].sh_type == SHT_REL) + err = apply_relocate(sechdrs, strtab, symindex, i, mod); + else if (sechdrs[i].sh_type == SHT_RELA) + err = apply_relocate_add(sechdrs, strtab, symindex, i, + mod); + if (err < 0) + break; + } + return err; +} + /* Additional bytes needed by arch in front of individual sections */ unsigned int __weak arch_mod_section_prepend(struct module *mod, unsigned int section) @@ -2179,6 +2212,10 @@ static int check_modinfo(struct module *mod, " the quality is unknown, you have been warned.\n", mod->name); } + + /* Set up license info based on the info section */ + set_license(mod, get_modinfo(sechdrs, infoindex, "license")); + return 0; } @@ -2245,6 +2282,10 @@ static void find_module_sections(struct module *mod, Elf_Ehdr *hdr, sizeof(*mod->ftrace_callsites), &mod->num_ftrace_callsites); #endif + + if (section_addr(hdr, sechdrs, secstrings, "__obsparm")) + printk(KERN_WARNING "%s: Ignoring obsolete parameters\n", + mod->name); } static struct module *move_module(struct module *mod, @@ -2311,6 +2352,60 @@ static struct module *move_module(struct module *mod, return mod; } +static int check_module_license_and_versions(struct module *mod, + Elf_Shdr *sechdrs) +{ + /* + * ndiswrapper is under GPL by itself, but loads proprietary modules. + * Don't use add_taint_module(), as it would prevent ndiswrapper from + * using GPL-only symbols it needs. + */ + if (strcmp(mod->name, "ndiswrapper") == 0) + add_taint(TAINT_PROPRIETARY_MODULE); + + /* driverloader was caught wrongly pretending to be under GPL */ + if (strcmp(mod->name, "driverloader") == 0) + add_taint_module(mod, TAINT_PROPRIETARY_MODULE); + +#ifdef CONFIG_MODVERSIONS + if ((mod->num_syms && !mod->crcs) + || (mod->num_gpl_syms && !mod->gpl_crcs) + || (mod->num_gpl_future_syms && !mod->gpl_future_crcs) +#ifdef CONFIG_UNUSED_SYMBOLS + || (mod->num_unused_syms && !mod->unused_crcs) + || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs) +#endif + ) { + return try_to_force_load(mod, + "no versions for exported symbols"); + } +#endif + return 0; +} + +static void flush_module_icache(const struct module *mod) +{ + mm_segment_t old_fs; + + /* flush the icache in correct context */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + + /* + * Flush the instruction cache, since we've played with text. + * Do it before processing of module parameters, so the module + * can provide parameter accessor functions of its own. + */ + if (mod->module_init) + flush_icache_range((unsigned long)mod->module_init, + (unsigned long)mod->module_init + + mod->init_size); + flush_icache_range((unsigned long)mod->module_core, + (unsigned long)mod->module_core + mod->core_size); + + set_fs(old_fs); +} + /* Allocate and load the module: note that size of section 0 is always zero, and we rely on this for optional sections. */ static noinline struct module *load_module(void __user *umod, @@ -2331,8 +2426,6 @@ static noinline struct module *load_module(void __user *umod, struct _ddebug *debug = NULL; unsigned int num_debug = 0; - mm_segment_t old_fs; - DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", umod, len, uargs); @@ -2453,20 +2546,13 @@ static noinline struct module *load_module(void __user *umod, if (err) goto free_init; - /* Set up license info based on the info section */ - set_license(mod, get_modinfo(sechdrs, infoindex, "license")); - - /* - * ndiswrapper is under GPL by itself, but loads proprietary modules. - * Don't use add_taint_module(), as it would prevent ndiswrapper from - * using GPL-only symbols it needs. - */ - if (strcmp(mod->name, "ndiswrapper") == 0) - add_taint(TAINT_PROPRIETARY_MODULE); + /* Now we've got everything in the final locations, we can + * find optional sections. */ + find_module_sections(mod, hdr, sechdrs, secstrings); - /* driverloader was caught wrongly pretending to be under GPL */ - if (strcmp(mod->name, "driverloader") == 0) - add_taint_module(mod, TAINT_PROPRIETARY_MODULE); + err = check_module_license_and_versions(mod, sechdrs); + if (err) + goto free_unload; /* Set up MODINFO_ATTR fields */ setup_modinfo(mod, sechdrs, infoindex); @@ -2477,47 +2563,9 @@ static noinline struct module *load_module(void __user *umod, if (err < 0) goto cleanup; - /* Now we've got everything in the final locations, we can - * find optional sections. */ - find_module_sections(mod, hdr, sechdrs, secstrings); - -#ifdef CONFIG_MODVERSIONS - if ((mod->num_syms && !mod->crcs) - || (mod->num_gpl_syms && !mod->gpl_crcs) - || (mod->num_gpl_future_syms && !mod->gpl_future_crcs) -#ifdef CONFIG_UNUSED_SYMBOLS - || (mod->num_unused_syms && !mod->unused_crcs) - || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs) -#endif - ) { - err = try_to_force_load(mod, - "no versions for exported symbols"); - if (err) - goto cleanup; - } -#endif - - /* Now do relocations. */ - for (i = 1; i < hdr->e_shnum; i++) { - const char *strtab = (char *)sechdrs[strindex].sh_addr; - unsigned int info = sechdrs[i].sh_info; - - /* Not a valid relocation section? */ - if (info >= hdr->e_shnum) - continue; - - /* Don't bother with non-allocated sections */ - if (!(sechdrs[info].sh_flags & SHF_ALLOC)) - continue; - - if (sechdrs[i].sh_type == SHT_REL) - err = apply_relocate(sechdrs, strtab, symindex, i,mod); - else if (sechdrs[i].sh_type == SHT_RELA) - err = apply_relocate_add(sechdrs, strtab, symindex, i, - mod); - if (err < 0) - goto cleanup; - } + err = apply_relocations(mod, hdr, sechdrs, symindex, strindex); + if (err < 0) + goto cleanup; /* Set up and sort exception table */ mod->extable = section_objs(hdr, sechdrs, secstrings, "__ex_table", @@ -2541,28 +2589,9 @@ static noinline struct module *load_module(void __user *umod, if (err < 0) goto cleanup; - /* flush the icache in correct context */ - old_fs = get_fs(); - set_fs(KERNEL_DS); - - /* - * Flush the instruction cache, since we've played with text. - * Do it before processing of module parameters, so the module - * can provide parameter accessor functions of its own. - */ - if (mod->module_init) - flush_icache_range((unsigned long)mod->module_init, - (unsigned long)mod->module_init - + mod->init_size); - flush_icache_range((unsigned long)mod->module_core, - (unsigned long)mod->module_core + mod->core_size); - - set_fs(old_fs); + flush_module_icache(mod); mod->args = args; - if (section_addr(hdr, sechdrs, secstrings, "__obsparm")) - printk(KERN_WARNING "%s: Ignoring obsolete parameters\n", - mod->name); /* Now sew it into the lists so we can get lockdep and oops * info during argument parsing. Noone should access us, since @@ -2619,6 +2648,7 @@ static noinline struct module *load_module(void __user *umod, module_arch_cleanup(mod); cleanup: free_modinfo(mod); + free_unload: module_unload_free(mod); free_init: module_free(mod, mod->module_init); -- cgit v1.1 From 44032e631691adf1f406843d5e54deb795973ff7 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 5 Aug 2010 12:59:05 -0600 Subject: module: reduce stack usage for each_symbol() And now that I'm looking at that call-chain (to see if it would make sense to use some other more specific lock - doesn't look like it: all the readers are using RCU and this is the only writer), I also give you this trivial one-liner. It changes each_symbol() to not put that constant array on the stack, resulting in changing movq $C.388.31095, %rsi #, tmp85 subq $376, %rsp #, movq %rdi, %rbx # fn, fn leaq -208(%rbp), %rdi #, tmp84 movq %rbx, %rdx # fn, rep movsl xorl %esi, %esi # leaq -208(%rbp), %rdi #, tmp87 movq %r12, %rcx # data, call each_symbol_in_section.clone.0 # into xorl %esi, %esi # subq $216, %rsp #, movq %rdi, %rbx # fn, fn movq $arr.31078, %rdi #, call each_symbol_in_section.clone.0 # which is not so much about being obviously shorter and simpler because we don't unnecessarily copy that constant array around onto the stack, but also about having a much smaller stack footprint (376 vs 216 bytes - see the update of 'rsp'). Signed-off-by: Linus Torvalds Signed-off-by: Rusty Russell --- kernel/module.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index b536db8..79545bd 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -227,7 +227,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner, unsigned int symnum, void *data), void *data) { struct module *mod; - const struct symsearch arr[] = { + static const struct symsearch arr[] = { { __start___ksymtab, __stop___ksymtab, __start___kcrctab, NOT_GPL_ONLY, false }, { __start___ksymtab_gpl, __stop___ksymtab_gpl, -- cgit v1.1 From 3264d3f9dd532ed9c3eb9491619e3f485b72747f Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 2 Jun 2010 11:01:06 -0700 Subject: module: add load_info Btw, here's a patch that _looks_ large, but it really pretty trivial, and sets things up so that it would be way easier to split off pieces of the module loading. The reason it looks large is that it creates a "module_info" structure that contains all the module state that we're building up while loading, instead of having individual variables for all the indices etc. So the patch ends up being large, because every "symindex" access instead becomes "info.index.sym" etc. That may be a few characters longer, but it then means that we can just pass a pointer to that "info" structure around. and let all the pieces fill it in very naturally. As an example of that, the patch also moves the initialization of all those convenience variables into a "setup_module_info()" function. And at this point it really does become very natural to start to peel off some of the error labels and move them into the helper functions - now the "truncated" case is gone, and is handled inside that setup function instead. So maybe you don't like this approach, and it does make the variable accesses a bit longer, but I don't think unreadably so. And the patch really does look big and scary, but there really should be absolutely no semantic changes - most of it was a trivial and mindless rename. In fact, it was so mindless that I on purpose kept the existing helper functions looking like this: - err = check_modinfo(mod, sechdrs, infoindex, versindex); + err = check_modinfo(mod, info.sechdrs, info.index.info, info.index.vers); rather than changing them to just take the "info" pointer. IOW, a second phase (if you think the approach is ok) would change that calling convention to just do err = check_modinfo(mod, &info); (and same for "layout_sections()", "layout_symtabs()" etc.) Similarly, while right now it makes things _look_ bigger, with things like this: versindex = find_sec(hdr, sechdrs, secstrings, "__versions"); becoming info->index.vers = find_sec(info->hdr, info->sechdrs, info->secstrings, "__versions"); in the new "setup_module_info()" function, that's again just a result of it being a search-and-replace patch. By using the 'info' pointer, we could just change the 'find_sec()' interface so that it ends up being info->index.vers = find_sec(info, "__versions"); instead, and then we'd actually have a shorter and more readable line. So for a lot of those mindless variable name expansions there's would be room for separate cleanups. I didn't move quite everything in there - if we do this to layout_symtabs, for example, we'd want to move the percpu, symoffs, stroffs, *strmap variables to be fields in that module_info structure too. But that's a much smaller patch, I moved just the really core stuff that is currently being set up and used in various parts. But even in this rough form, it removes close to 70 lines from that function (but adds 22 lines overall, of course - the structure definition, the helper function declarations and call-sites etc etc). Signed-off-by: Linus Torvalds Signed-off-by: Rusty Russell --- kernel/module.c | 228 +++++++++++++++++++++++++++++++------------------------- 1 file changed, 125 insertions(+), 103 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 79545bd..cb40a4e 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2148,8 +2148,17 @@ static inline void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, } #endif -static int copy_and_check(Elf_Ehdr **hdrp, - const void __user *umod, unsigned long len) +struct load_info { + Elf_Ehdr *hdr; + unsigned long len; + Elf_Shdr *sechdrs; + char *secstrings, *args, *strtab; + struct { + unsigned int sym, str, mod, vers, info, pcpu; + } index; +}; + +static int copy_and_check(struct load_info *info, const void __user *umod, unsigned long len) { int err; Elf_Ehdr *hdr; @@ -2159,7 +2168,7 @@ static int copy_and_check(Elf_Ehdr **hdrp, /* Suck in entire file: we'll want most of it. */ /* vmalloc barfs on "unusual" numbers. Check here */ - if (len > 64 * 1024 * 1024 || (hdr = *hdrp = vmalloc(len)) == NULL) + if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL) return -ENOMEM; if (copy_from_user(hdr, umod, len) != 0) { @@ -2181,6 +2190,8 @@ static int copy_and_check(Elf_Ehdr **hdrp, err = -ENOEXEC; goto free_hdr; } + info->hdr = hdr; + info->len = len; return 0; free_hdr: @@ -2188,6 +2199,80 @@ free_hdr: return err; } +/* + * Set up our basic convenience variables (pointers to section headers, + * search for module section index etc), and do some basic section + * verification. + * + * Return the temporary module pointer (we'll replace it with the final + * one when we move the module sections around). + */ +static struct module *setup_load_info(struct load_info *info) +{ + unsigned int i; + struct module *mod; + + /* Set up the convenience variables */ + info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; + info->secstrings = (void *)info->hdr + info->sechdrs[info->hdr->e_shstrndx].sh_offset; + info->sechdrs[0].sh_addr = 0; + + for (i = 1; i < info->hdr->e_shnum; i++) { + if (info->sechdrs[i].sh_type != SHT_NOBITS + && info->len < info->sechdrs[i].sh_offset + info->sechdrs[i].sh_size) + goto truncated; + + /* Mark all sections sh_addr with their address in the + temporary image. */ + info->sechdrs[i].sh_addr = (size_t)info->hdr + info->sechdrs[i].sh_offset; + + /* Internal symbols and strings. */ + if (info->sechdrs[i].sh_type == SHT_SYMTAB) { + info->index.sym = i; + info->index.str = info->sechdrs[i].sh_link; + info->strtab = (char *)info->hdr + info->sechdrs[info->index.str].sh_offset; + } +#ifndef CONFIG_MODULE_UNLOAD + /* Don't load .exit sections */ + if (strstarts(info->secstrings+info->sechdrs[i].sh_name, ".exit")) + info->sechdrs[i].sh_flags &= ~(unsigned long)SHF_ALLOC; +#endif + } + + info->index.mod = find_sec(info->hdr, info->sechdrs, info->secstrings, + ".gnu.linkonce.this_module"); + if (!info->index.mod) { + printk(KERN_WARNING "No module found in object\n"); + return ERR_PTR(-ENOEXEC); + } + /* This is temporary: point mod into copy of data. */ + mod = (void *)info->sechdrs[info->index.mod].sh_addr; + + if (info->index.sym == 0) { + printk(KERN_WARNING "%s: module has no symbols (stripped?)\n", + mod->name); + return ERR_PTR(-ENOEXEC); + } + + info->index.vers = find_sec(info->hdr, info->sechdrs, info->secstrings, "__versions"); + info->index.info = find_sec(info->hdr, info->sechdrs, info->secstrings, ".modinfo"); + info->index.pcpu = find_pcpusec(info->hdr, info->sechdrs, info->secstrings); + + /* Don't keep modinfo and version sections. */ + info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; + info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; + + /* Check module struct version now, before we try to use module. */ + if (!check_modstruct_version(info->sechdrs, info->index.vers, mod)) + return ERR_PTR(-ENOEXEC); + + return mod; + + truncated: + printk(KERN_ERR "Module len %lu truncated\n", info->len); + return ERR_PTR(-ENOEXEC); +} + static int check_modinfo(struct module *mod, const Elf_Shdr *sechdrs, unsigned int infoindex, unsigned int versindex) @@ -2412,13 +2497,7 @@ static noinline struct module *load_module(void __user *umod, unsigned long len, const char __user *uargs) { - Elf_Ehdr *hdr; - Elf_Shdr *sechdrs; - char *secstrings, *args, *strtab = NULL; - unsigned int i; - unsigned int symindex = 0; - unsigned int strindex = 0; - unsigned int modindex, versindex, infoindex, pcpuindex; + struct load_info info = { NULL, }; struct module *mod; long err; unsigned long symoffs, stroffs, *strmap; @@ -2429,80 +2508,28 @@ static noinline struct module *load_module(void __user *umod, DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", umod, len, uargs); - err = copy_and_check(&hdr, umod, len); + err = copy_and_check(&info, umod, len); if (err) return ERR_PTR(err); - /* Convenience variables */ - sechdrs = (void *)hdr + hdr->e_shoff; - secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; - sechdrs[0].sh_addr = 0; - - for (i = 1; i < hdr->e_shnum; i++) { - if (sechdrs[i].sh_type != SHT_NOBITS - && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) - goto truncated; - - /* Mark all sections sh_addr with their address in the - temporary image. */ - sechdrs[i].sh_addr = (size_t)hdr + sechdrs[i].sh_offset; - - /* Internal symbols and strings. */ - if (sechdrs[i].sh_type == SHT_SYMTAB) { - symindex = i; - strindex = sechdrs[i].sh_link; - strtab = (char *)hdr + sechdrs[strindex].sh_offset; - } -#ifndef CONFIG_MODULE_UNLOAD - /* Don't load .exit sections */ - if (strstarts(secstrings+sechdrs[i].sh_name, ".exit")) - sechdrs[i].sh_flags &= ~(unsigned long)SHF_ALLOC; -#endif - } - - modindex = find_sec(hdr, sechdrs, secstrings, - ".gnu.linkonce.this_module"); - if (!modindex) { - printk(KERN_WARNING "No module found in object\n"); - err = -ENOEXEC; - goto free_hdr; - } - /* This is temporary: point mod into copy of data. */ - mod = (void *)sechdrs[modindex].sh_addr; - - if (symindex == 0) { - printk(KERN_WARNING "%s: module has no symbols (stripped?)\n", - mod->name); - err = -ENOEXEC; - goto free_hdr; - } - - versindex = find_sec(hdr, sechdrs, secstrings, "__versions"); - infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo"); - pcpuindex = find_pcpusec(hdr, sechdrs, secstrings); - - /* Don't keep modinfo and version sections. */ - sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC; - sechdrs[versindex].sh_flags &= ~(unsigned long)SHF_ALLOC; - - /* Check module struct version now, before we try to use module. */ - if (!check_modstruct_version(sechdrs, versindex, mod)) { - err = -ENOEXEC; + mod = setup_load_info(&info); + if (IS_ERR(mod)) { + err = PTR_ERR(mod); goto free_hdr; } - err = check_modinfo(mod, sechdrs, infoindex, versindex); + err = check_modinfo(mod, info.sechdrs, info.index.info, info.index.vers); if (err) goto free_hdr; /* Now copy in args */ - args = strndup_user(uargs, ~0UL >> 1); - if (IS_ERR(args)) { - err = PTR_ERR(args); + info.args = strndup_user(uargs, ~0UL >> 1); + if (IS_ERR(info.args)) { + err = PTR_ERR(info.args); goto free_hdr; } - strmap = kzalloc(BITS_TO_LONGS(sechdrs[strindex].sh_size) + strmap = kzalloc(BITS_TO_LONGS(info.sechdrs[info.index.str].sh_size) * sizeof(long), GFP_KERNEL); if (!strmap) { err = -ENOMEM; @@ -2512,17 +2539,17 @@ static noinline struct module *load_module(void __user *umod, mod->state = MODULE_STATE_COMING; /* Allow arches to frob section contents and sizes. */ - err = module_frob_arch_sections(hdr, sechdrs, secstrings, mod); + err = module_frob_arch_sections(info.hdr, info.sechdrs, info.secstrings, mod); if (err < 0) goto free_mod; - if (pcpuindex) { + if (info.index.pcpu) { /* We have a special allocation for this section. */ - err = percpu_modalloc(mod, sechdrs[pcpuindex].sh_size, - sechdrs[pcpuindex].sh_addralign); + err = percpu_modalloc(mod, info.sechdrs[info.index.pcpu].sh_size, + info.sechdrs[info.index.pcpu].sh_addralign); if (err) goto free_mod; - sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; + info.sechdrs[info.index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; } /* Keep this around for failure path. */ percpu = mod_percpu(mod); @@ -2530,12 +2557,12 @@ static noinline struct module *load_module(void __user *umod, /* Determine total sizes, and put offsets in sh_entsize. For now this is done generically; there doesn't appear to be any special cases for the architectures. */ - layout_sections(mod, hdr, sechdrs, secstrings); - symoffs = layout_symtab(mod, sechdrs, symindex, strindex, hdr, - secstrings, &stroffs, strmap); + layout_sections(mod, info.hdr, info.sechdrs, info.secstrings); + symoffs = layout_symtab(mod, info.sechdrs, info.index.sym, info.index.str, info.hdr, + info.secstrings, &stroffs, strmap); /* Allocate and move to the final place */ - mod = move_module(mod, hdr, sechdrs, secstrings, modindex); + mod = move_module(mod, info.hdr, info.sechdrs, info.secstrings, info.index.mod); if (IS_ERR(mod)) { err = PTR_ERR(mod); goto free_percpu; @@ -2548,50 +2575,50 @@ static noinline struct module *load_module(void __user *umod, /* Now we've got everything in the final locations, we can * find optional sections. */ - find_module_sections(mod, hdr, sechdrs, secstrings); + find_module_sections(mod, info.hdr, info.sechdrs, info.secstrings); - err = check_module_license_and_versions(mod, sechdrs); + err = check_module_license_and_versions(mod, info.sechdrs); if (err) goto free_unload; /* Set up MODINFO_ATTR fields */ - setup_modinfo(mod, sechdrs, infoindex); + setup_modinfo(mod, info.sechdrs, info.index.info); /* Fix up syms, so that st_value is a pointer to location. */ - err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex, + err = simplify_symbols(info.sechdrs, info.index.sym, info.strtab, info.index.vers, info.index.pcpu, mod); if (err < 0) goto cleanup; - err = apply_relocations(mod, hdr, sechdrs, symindex, strindex); + err = apply_relocations(mod, info.hdr, info.sechdrs, info.index.sym, info.index.str); if (err < 0) goto cleanup; /* Set up and sort exception table */ - mod->extable = section_objs(hdr, sechdrs, secstrings, "__ex_table", + mod->extable = section_objs(info.hdr, info.sechdrs, info.secstrings, "__ex_table", sizeof(*mod->extable), &mod->num_exentries); sort_extable(mod->extable, mod->extable + mod->num_exentries); /* Finally, copy percpu area over. */ - percpu_modcopy(mod, (void *)sechdrs[pcpuindex].sh_addr, - sechdrs[pcpuindex].sh_size); + percpu_modcopy(mod, (void *)info.sechdrs[info.index.pcpu].sh_addr, + info.sechdrs[info.index.pcpu].sh_size); - add_kallsyms(mod, sechdrs, hdr->e_shnum, symindex, strindex, - symoffs, stroffs, secstrings, strmap); + add_kallsyms(mod, info.sechdrs, info.hdr->e_shnum, info.index.sym, info.index.str, + symoffs, stroffs, info.secstrings, strmap); kfree(strmap); strmap = NULL; if (!mod->taints) - debug = section_objs(hdr, sechdrs, secstrings, "__verbose", + debug = section_objs(info.hdr, info.sechdrs, info.secstrings, "__verbose", sizeof(*debug), &num_debug); - err = module_finalize(hdr, sechdrs, mod); + err = module_finalize(info.hdr, info.sechdrs, mod); if (err < 0) goto cleanup; flush_module_icache(mod); - mod->args = args; + mod->args = info.args; /* Now sew it into the lists so we can get lockdep and oops * info during argument parsing. Noone should access us, since @@ -2625,11 +2652,11 @@ static noinline struct module *load_module(void __user *umod, if (err < 0) goto unlink; - add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs); - add_notes_attrs(mod, hdr->e_shnum, secstrings, sechdrs); + add_sect_attrs(mod, info.hdr->e_shnum, info.secstrings, info.sechdrs); + add_notes_attrs(mod, info.hdr->e_shnum, info.secstrings, info.sechdrs); /* Get rid of temporary copy */ - vfree(hdr); + vfree(info.hdr); trace_module_load(mod); @@ -2657,16 +2684,11 @@ static noinline struct module *load_module(void __user *umod, free_percpu: free_percpu(percpu); free_mod: - kfree(args); + kfree(info.args); kfree(strmap); free_hdr: - vfree(hdr); + vfree(info.hdr); return ERR_PTR(err); - - truncated: - printk(KERN_ERR "Module len %lu truncated\n", len); - err = -ENOEXEC; - goto free_hdr; } /* Call module constructors. */ -- cgit v1.1 From 8b5f61a795fe37be090b0fd18b6b7271db9298e0 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Aug 2010 12:59:06 -0600 Subject: module: refactor out section header rewriting Put all the "rewrite and check section headers" in one place. This adds another iteration over the sections, but it's far clearer. We iterate once for every find_section() so we already iterate over many times. Signed-off-by: Rusty Russell --- kernel/module.c | 70 ++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 45 insertions(+), 25 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index cb40a4e..91f3ebe 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2158,6 +2158,7 @@ struct load_info { } index; }; +/* Sets info->hdr and info->len. */ static int copy_and_check(struct load_info *info, const void __user *umod, unsigned long len) { int err; @@ -2199,6 +2200,39 @@ free_hdr: return err; } +static int rewrite_section_headers(struct load_info *info) +{ + unsigned int i; + + /* This should always be true, but let's be sure. */ + info->sechdrs[0].sh_addr = 0; + + for (i = 1; i < info->hdr->e_shnum; i++) { + Elf_Shdr *shdr = &info->sechdrs[i]; + if (shdr->sh_type != SHT_NOBITS + && info->len < shdr->sh_offset + shdr->sh_size) { + printk(KERN_ERR "Module len %lu truncated\n", + info->len); + return -ENOEXEC; + } + + /* Mark all sections sh_addr with their address in the + temporary image. */ + shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; + +#ifndef CONFIG_MODULE_UNLOAD + /* Don't load .exit sections */ + if (strstarts(info->secstrings+shdr->sh_name, ".exit")) + shdr->sh_flags &= ~(unsigned long)SHF_ALLOC; +#endif + /* Don't keep modinfo and version sections. */ + if (!strcmp(info->secstrings+shdr->sh_name, "__versions") + || !strcmp(info->secstrings+shdr->sh_name, ".modinfo")) + shdr->sh_flags &= ~(unsigned long)SHF_ALLOC; + } + return 0; +} + /* * Set up our basic convenience variables (pointers to section headers, * search for module section index etc), and do some basic section @@ -2210,33 +2244,27 @@ free_hdr: static struct module *setup_load_info(struct load_info *info) { unsigned int i; + int err; struct module *mod; /* Set up the convenience variables */ info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; - info->secstrings = (void *)info->hdr + info->sechdrs[info->hdr->e_shstrndx].sh_offset; - info->sechdrs[0].sh_addr = 0; - - for (i = 1; i < info->hdr->e_shnum; i++) { - if (info->sechdrs[i].sh_type != SHT_NOBITS - && info->len < info->sechdrs[i].sh_offset + info->sechdrs[i].sh_size) - goto truncated; + info->secstrings = (void *)info->hdr + + info->sechdrs[info->hdr->e_shstrndx].sh_offset; - /* Mark all sections sh_addr with their address in the - temporary image. */ - info->sechdrs[i].sh_addr = (size_t)info->hdr + info->sechdrs[i].sh_offset; + err = rewrite_section_headers(info); + if (err) + return ERR_PTR(err); - /* Internal symbols and strings. */ + /* Find internal symbols and strings. */ + for (i = 1; i < info->hdr->e_shnum; i++) { if (info->sechdrs[i].sh_type == SHT_SYMTAB) { info->index.sym = i; info->index.str = info->sechdrs[i].sh_link; - info->strtab = (char *)info->hdr + info->sechdrs[info->index.str].sh_offset; + info->strtab = (char *)info->hdr + + info->sechdrs[info->index.str].sh_offset; + break; } -#ifndef CONFIG_MODULE_UNLOAD - /* Don't load .exit sections */ - if (strstarts(info->secstrings+info->sechdrs[i].sh_name, ".exit")) - info->sechdrs[i].sh_flags &= ~(unsigned long)SHF_ALLOC; -#endif } info->index.mod = find_sec(info->hdr, info->sechdrs, info->secstrings, @@ -2258,19 +2286,11 @@ static struct module *setup_load_info(struct load_info *info) info->index.info = find_sec(info->hdr, info->sechdrs, info->secstrings, ".modinfo"); info->index.pcpu = find_pcpusec(info->hdr, info->sechdrs, info->secstrings); - /* Don't keep modinfo and version sections. */ - info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; - info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; - /* Check module struct version now, before we try to use module. */ if (!check_modstruct_version(info->sechdrs, info->index.vers, mod)) return ERR_PTR(-ENOEXEC); return mod; - - truncated: - printk(KERN_ERR "Module len %lu truncated\n", info->len); - return ERR_PTR(-ENOEXEC); } static int check_modinfo(struct module *mod, -- cgit v1.1 From d6df72a06e067139d491da4a9d14d92ad39e7a50 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Aug 2010 12:59:07 -0600 Subject: module: refactor out section header rewriting: FIX modversions We can't do the find_sec after removing the SHF_ALLOC flags; it won't find the sections. Signed-off-by: Rusty Russell --- kernel/module.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 91f3ebe..ab07f67 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2225,11 +2225,13 @@ static int rewrite_section_headers(struct load_info *info) if (strstarts(info->secstrings+shdr->sh_name, ".exit")) shdr->sh_flags &= ~(unsigned long)SHF_ALLOC; #endif - /* Don't keep modinfo and version sections. */ - if (!strcmp(info->secstrings+shdr->sh_name, "__versions") - || !strcmp(info->secstrings+shdr->sh_name, ".modinfo")) - shdr->sh_flags &= ~(unsigned long)SHF_ALLOC; } + + /* Track but don't keep modinfo and version sections. */ + info->index.vers = find_sec(info->hdr, info->sechdrs, info->secstrings, "__versions"); + info->index.info = find_sec(info->hdr, info->sechdrs, info->secstrings, ".modinfo"); + info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; + info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; return 0; } @@ -2282,8 +2284,6 @@ static struct module *setup_load_info(struct load_info *info) return ERR_PTR(-ENOEXEC); } - info->index.vers = find_sec(info->hdr, info->sechdrs, info->secstrings, "__versions"); - info->index.info = find_sec(info->hdr, info->sechdrs, info->secstrings, ".modinfo"); info->index.pcpu = find_pcpusec(info->hdr, info->sechdrs, info->secstrings); /* Check module struct version now, before we try to use module. */ -- cgit v1.1 From eded41c1c6466081e0eb00d38719c6e6ee81a5d4 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Aug 2010 12:59:07 -0600 Subject: module: kallsyms functions take struct load_info Simple refactor causes us to lift struct definition to top of file. Signed-off-by: Rusty Russell --- kernel/module.c | 68 ++++++++++++++++++++++++--------------------------------- 1 file changed, 29 insertions(+), 39 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index ab07f67..79c4d6f 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -110,6 +110,16 @@ int unregister_module_notifier(struct notifier_block * nb) } EXPORT_SYMBOL(unregister_module_notifier); +struct load_info { + Elf_Ehdr *hdr; + unsigned long len; + Elf_Shdr *sechdrs; + char *secstrings, *args, *strtab; + struct { + unsigned int sym, str, mod, vers, info, pcpu; + } index; +}; + /* We require a truly strong try_module_get(): 0 means failure due to ongoing or failed initialization etc. */ static inline int strong_try_module_get(struct module *mod) @@ -1909,11 +1919,10 @@ static int is_exported(const char *name, unsigned long value, } /* As per nm */ -static char elf_type(const Elf_Sym *sym, - Elf_Shdr *sechdrs, - const char *secstrings, - struct module *mod) +static char elf_type(const Elf_Sym *sym, const struct load_info *info) { + const Elf_Shdr *sechdrs = info->sechdrs; + if (ELF_ST_BIND(sym->st_info) == STB_WEAK) { if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT) return 'v'; @@ -1943,8 +1952,10 @@ static char elf_type(const Elf_Sym *sym, else return 'b'; } - if (strstarts(secstrings + sechdrs[sym->st_shndx].sh_name, ".debug")) + if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name, + ".debug")) { return 'n'; + } return '?'; } @@ -2021,35 +2032,30 @@ static unsigned long layout_symtab(struct module *mod, return symoffs; } -static void add_kallsyms(struct module *mod, - Elf_Shdr *sechdrs, - unsigned int shnum, - unsigned int symindex, - unsigned int strindex, +static void add_kallsyms(struct module *mod, struct load_info *info, unsigned long symoffs, unsigned long stroffs, - const char *secstrings, unsigned long *strmap) { unsigned int i, ndst; const Elf_Sym *src; Elf_Sym *dst; char *s; + Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; - mod->symtab = (void *)sechdrs[symindex].sh_addr; - mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym); - mod->strtab = (void *)sechdrs[strindex].sh_addr; + mod->symtab = (void *)symsec->sh_addr; + mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym); + mod->strtab = info->strtab; /* Set types up while we still have access to sections. */ for (i = 0; i < mod->num_symtab; i++) - mod->symtab[i].st_info - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod); + mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); mod->core_symtab = dst = mod->module_core + symoffs; src = mod->symtab; *dst = *src; for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) { - if (!is_core_symbol(src, sechdrs, shnum)) + if (!is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) continue; dst[ndst] = *src; dst[ndst].st_name = bitmap_weight(strmap, dst[ndst].st_name); @@ -2058,7 +2064,7 @@ static void add_kallsyms(struct module *mod, mod->core_num_syms = ndst; mod->core_strtab = s = mod->module_core + stroffs; - for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i) + for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i) if (test_bit(i, strmap)) *++s = mod->strtab[i]; } @@ -2075,15 +2081,10 @@ static inline unsigned long layout_symtab(struct module *mod, return 0; } -static inline void add_kallsyms(struct module *mod, - Elf_Shdr *sechdrs, - unsigned int shnum, - unsigned int symindex, - unsigned int strindex, - unsigned long symoffs, - unsigned long stroffs, - const char *secstrings, - const unsigned long *strmap) +static void add_kallsyms(struct module *mod, struct load_info *info, + unsigned long symoffs, + unsigned long stroffs, + unsigned long *strmap) { } #endif /* CONFIG_KALLSYMS */ @@ -2148,16 +2149,6 @@ static inline void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, } #endif -struct load_info { - Elf_Ehdr *hdr; - unsigned long len; - Elf_Shdr *sechdrs; - char *secstrings, *args, *strtab; - struct { - unsigned int sym, str, mod, vers, info, pcpu; - } index; -}; - /* Sets info->hdr and info->len. */ static int copy_and_check(struct load_info *info, const void __user *umod, unsigned long len) { @@ -2623,8 +2614,7 @@ static noinline struct module *load_module(void __user *umod, percpu_modcopy(mod, (void *)info.sechdrs[info.index.pcpu].sh_addr, info.sechdrs[info.index.pcpu].sh_size); - add_kallsyms(mod, info.sechdrs, info.hdr->e_shnum, info.index.sym, info.index.str, - symoffs, stroffs, info.secstrings, strmap); + add_kallsyms(mod, &info, symoffs, stroffs, strmap); kfree(strmap); strmap = NULL; -- cgit v1.1 From 511ca6ae43fbe0a7c9e0b50ad275f7ef24ef3b58 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Aug 2010 12:59:08 -0600 Subject: module: fix crash in get_ksymbol() when oopsing in module init Andrew had the sole pleasure of tickling this bug in linux-next; when we set up "info->strtab" it's pointing into the temporary copy of the module. For most uses that is fine, but kallsyms keeps a pointer around during module load (inside mod->strtab). If we oops for some reason inside a module's init function, kallsyms will use the mod->strtab pointer into the now-freed temporary module copy. (Later oopses work fine: after init we overwrite mod->strtab to point to a compacted core-only strtab). Reported-by: Andrew "Grumpy" Morton Signed-off-by: Rusty "Buggy" Russell Tested-by: Andrew "Happy" Morton Signed-off-by: Rusty Russell --- kernel/module.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 79c4d6f..60cdd04 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2045,7 +2045,8 @@ static void add_kallsyms(struct module *mod, struct load_info *info, mod->symtab = (void *)symsec->sh_addr; mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym); - mod->strtab = info->strtab; + /* Make sure we get permanent strtab: don't use info->strtab. */ + mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr; /* Set types up while we still have access to sections. */ for (i = 0; i < mod->num_symtab; i++) -- cgit v1.1 From d913188c75191114051cf0bac75dad444c6080fa Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Aug 2010 12:59:08 -0600 Subject: module: layout_and_allocate layout_and_allocate() does everything up to and including the final struct module placement inside the allocated module memory. We have to store the symbol layout information in our struct load_info though. This avoids the nasty code we had before where 'mod' pointed first to the version inside the temporary allocation containing the entire file, then later was moved to point to the real struct module: now the main code only ever sees the final module address. (Includes fix for the Tony Luck-found Linus-diagnosed failure path error). Signed-off-by: Rusty Russell --- kernel/module.c | 224 +++++++++++++++++++++++++++++++------------------------- 1 file changed, 125 insertions(+), 99 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 60cdd04..fb11e2a 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -115,6 +115,8 @@ struct load_info { unsigned long len; Elf_Shdr *sechdrs; char *secstrings, *args, *strtab; + unsigned long *strmap; + unsigned long symoffs, stroffs; struct { unsigned int sym, str, mod, vers, info, pcpu; } index; @@ -402,7 +404,8 @@ static int percpu_modalloc(struct module *mod, mod->percpu = __alloc_reserved_percpu(size, align); if (!mod->percpu) { printk(KERN_WARNING - "Could not allocate %lu bytes percpu data\n", size); + "%s: Could not allocate %lu bytes percpu data\n", + mod->name, size); return -ENOMEM; } mod->percpu_size = size; @@ -2032,10 +2035,7 @@ static unsigned long layout_symtab(struct module *mod, return symoffs; } -static void add_kallsyms(struct module *mod, struct load_info *info, - unsigned long symoffs, - unsigned long stroffs, - unsigned long *strmap) +static void add_kallsyms(struct module *mod, struct load_info *info) { unsigned int i, ndst; const Elf_Sym *src; @@ -2052,21 +2052,22 @@ static void add_kallsyms(struct module *mod, struct load_info *info, for (i = 0; i < mod->num_symtab; i++) mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); - mod->core_symtab = dst = mod->module_core + symoffs; + mod->core_symtab = dst = mod->module_core + info->symoffs; src = mod->symtab; *dst = *src; for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) { if (!is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) continue; dst[ndst] = *src; - dst[ndst].st_name = bitmap_weight(strmap, dst[ndst].st_name); + dst[ndst].st_name = bitmap_weight(info->strmap, + dst[ndst].st_name); ++ndst; } mod->core_num_syms = ndst; - mod->core_strtab = s = mod->module_core + stroffs; + mod->core_strtab = s = mod->module_core + info->stroffs; for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i) - if (test_bit(i, strmap)) + if (test_bit(i, info->strmap)) *++s = mod->strtab[i]; } #else @@ -2082,10 +2083,7 @@ static inline unsigned long layout_symtab(struct module *mod, return 0; } -static void add_kallsyms(struct module *mod, struct load_info *info, - unsigned long symoffs, - unsigned long stroffs, - unsigned long *strmap) +static void add_kallsyms(struct module *mod, struct load_info *info) { } #endif /* CONFIG_KALLSYMS */ @@ -2150,8 +2148,10 @@ static inline void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, } #endif -/* Sets info->hdr and info->len. */ -static int copy_and_check(struct load_info *info, const void __user *umod, unsigned long len) +/* Sets info->hdr, info->len and info->args. */ +static int copy_and_check(struct load_info *info, + const void __user *umod, unsigned long len, + const char __user *uargs) { int err; Elf_Ehdr *hdr; @@ -2183,6 +2183,14 @@ static int copy_and_check(struct load_info *info, const void __user *umod, unsig err = -ENOEXEC; goto free_hdr; } + + /* Now copy in args */ + info->args = strndup_user(uargs, ~0UL >> 1); + if (IS_ERR(info->args)) { + err = PTR_ERR(info->args); + goto free_hdr; + } + info->hdr = hdr; info->len = len; return 0; @@ -2192,6 +2200,12 @@ free_hdr: return err; } +static void free_copy(struct load_info *info) +{ + kfree(info->args); + vfree(info->hdr); +} + static int rewrite_section_headers(struct load_info *info) { unsigned int i; @@ -2385,9 +2399,9 @@ static void find_module_sections(struct module *mod, Elf_Ehdr *hdr, mod->name); } -static struct module *move_module(struct module *mod, - Elf_Ehdr *hdr, Elf_Shdr *sechdrs, - const char *secstrings, unsigned modindex) +static int move_module(struct module *mod, + Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + const char *secstrings, unsigned modindex) { int i; void *ptr; @@ -2401,7 +2415,7 @@ static struct module *move_module(struct module *mod, */ kmemleak_not_leak(ptr); if (!ptr) - return ERR_PTR(-ENOMEM); + return -ENOMEM; memset(ptr, 0, mod->core_size); mod->module_core = ptr; @@ -2416,7 +2430,7 @@ static struct module *move_module(struct module *mod, kmemleak_ignore(ptr); if (!ptr && mod->init_size) { module_free(mod, mod->module_core); - return ERR_PTR(-ENOMEM); + return -ENOMEM; } memset(ptr, 0, mod->init_size); mod->module_init = ptr; @@ -2443,10 +2457,8 @@ static struct module *move_module(struct module *mod, DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name); } - /* Module has been moved. */ - mod = (void *)sechdrs[modindex].sh_addr; - kmemleak_load_module(mod, hdr, sechdrs, secstrings); - return mod; + + return 0; } static int check_module_license_and_versions(struct module *mod, @@ -2503,87 +2515,107 @@ static void flush_module_icache(const struct module *mod) set_fs(old_fs); } -/* Allocate and load the module: note that size of section 0 is always - zero, and we rely on this for optional sections. */ -static noinline struct module *load_module(void __user *umod, - unsigned long len, - const char __user *uargs) +static struct module *layout_and_allocate(struct load_info *info) { - struct load_info info = { NULL, }; + /* Module within temporary copy. */ struct module *mod; - long err; - unsigned long symoffs, stroffs, *strmap; - void __percpu *percpu; - struct _ddebug *debug = NULL; - unsigned int num_debug = 0; + int err; - DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", - umod, len, uargs); + mod = setup_load_info(info); + if (IS_ERR(mod)) + return mod; - err = copy_and_check(&info, umod, len); + err = check_modinfo(mod, info->sechdrs, info->index.info, info->index.vers); if (err) return ERR_PTR(err); - mod = setup_load_info(&info); - if (IS_ERR(mod)) { - err = PTR_ERR(mod); - goto free_hdr; - } - - err = check_modinfo(mod, info.sechdrs, info.index.info, info.index.vers); - if (err) - goto free_hdr; - - /* Now copy in args */ - info.args = strndup_user(uargs, ~0UL >> 1); - if (IS_ERR(info.args)) { - err = PTR_ERR(info.args); - goto free_hdr; - } - - strmap = kzalloc(BITS_TO_LONGS(info.sechdrs[info.index.str].sh_size) - * sizeof(long), GFP_KERNEL); - if (!strmap) { - err = -ENOMEM; - goto free_mod; - } - - mod->state = MODULE_STATE_COMING; - /* Allow arches to frob section contents and sizes. */ - err = module_frob_arch_sections(info.hdr, info.sechdrs, info.secstrings, mod); + err = module_frob_arch_sections(info->hdr, info->sechdrs, info->secstrings, mod); if (err < 0) - goto free_mod; + goto free_args; - if (info.index.pcpu) { + if (info->index.pcpu) { /* We have a special allocation for this section. */ - err = percpu_modalloc(mod, info.sechdrs[info.index.pcpu].sh_size, - info.sechdrs[info.index.pcpu].sh_addralign); + err = percpu_modalloc(mod, info->sechdrs[info->index.pcpu].sh_size, + info->sechdrs[info->index.pcpu].sh_addralign); if (err) - goto free_mod; - info.sechdrs[info.index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; + goto free_args; + info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; } - /* Keep this around for failure path. */ - percpu = mod_percpu(mod); /* Determine total sizes, and put offsets in sh_entsize. For now this is done generically; there doesn't appear to be any special cases for the architectures. */ - layout_sections(mod, info.hdr, info.sechdrs, info.secstrings); - symoffs = layout_symtab(mod, info.sechdrs, info.index.sym, info.index.str, info.hdr, - info.secstrings, &stroffs, strmap); + layout_sections(mod, info->hdr, info->sechdrs, info->secstrings); + + info->strmap = kzalloc(BITS_TO_LONGS(info->sechdrs[info->index.str].sh_size) + * sizeof(long), GFP_KERNEL); + if (!info->strmap) { + err = -ENOMEM; + goto free_percpu; + } + info->symoffs = layout_symtab(mod, info->sechdrs, info->index.sym, info->index.str, info->hdr, + info->secstrings, &info->stroffs, info->strmap); /* Allocate and move to the final place */ - mod = move_module(mod, info.hdr, info.sechdrs, info.secstrings, info.index.mod); + err = move_module(mod, info->hdr, info->sechdrs, info->secstrings, info->index.mod); + if (err) + goto free_strmap; + + /* Module has been copied to its final place now: return it. */ + mod = (void *)info->sechdrs[info->index.mod].sh_addr; + kmemleak_load_module(mod, info->hdr, info->sechdrs, info->secstrings); + return mod; + +free_strmap: + kfree(info->strmap); +free_percpu: + percpu_modfree(mod); +free_args: + kfree(info->args); + return ERR_PTR(err); +} + +/* mod is no longer valid after this! */ +static void module_deallocate(struct module *mod, struct load_info *info) +{ + kfree(info->strmap); + percpu_modfree(mod); + module_free(mod, mod->module_init); + module_free(mod, mod->module_core); +} + +/* Allocate and load the module: note that size of section 0 is always + zero, and we rely on this for optional sections. */ +static noinline struct module *load_module(void __user *umod, + unsigned long len, + const char __user *uargs) +{ + struct load_info info = { NULL, }; + struct module *mod; + long err; + struct _ddebug *debug = NULL; + unsigned int num_debug = 0; + + DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", + umod, len, uargs); + + /* Copy in the blobs from userspace, check they are vaguely sane. */ + err = copy_and_check(&info, umod, len, uargs); + if (err) + return ERR_PTR(err); + + /* Figure out module layout, and allocate all the memory. */ + mod = layout_and_allocate(&info); if (IS_ERR(mod)) { err = PTR_ERR(mod); - goto free_percpu; + goto free_copy; } /* Now we've moved module, initialize linked lists, etc. */ err = module_unload_init(mod); if (err) - goto free_init; + goto free_module; /* Now we've got everything in the final locations, we can * find optional sections. */ @@ -2600,11 +2632,11 @@ static noinline struct module *load_module(void __user *umod, err = simplify_symbols(info.sechdrs, info.index.sym, info.strtab, info.index.vers, info.index.pcpu, mod); if (err < 0) - goto cleanup; + goto free_modinfo; err = apply_relocations(mod, info.hdr, info.sechdrs, info.index.sym, info.index.str); if (err < 0) - goto cleanup; + goto free_modinfo; /* Set up and sort exception table */ mod->extable = section_objs(info.hdr, info.sechdrs, info.secstrings, "__ex_table", @@ -2615,9 +2647,7 @@ static noinline struct module *load_module(void __user *umod, percpu_modcopy(mod, (void *)info.sechdrs[info.index.pcpu].sh_addr, info.sechdrs[info.index.pcpu].sh_size); - add_kallsyms(mod, &info, symoffs, stroffs, strmap); - kfree(strmap); - strmap = NULL; + add_kallsyms(mod, &info); if (!mod->taints) debug = section_objs(info.hdr, info.sechdrs, info.secstrings, "__verbose", @@ -2625,12 +2655,14 @@ static noinline struct module *load_module(void __user *umod, err = module_finalize(info.hdr, info.sechdrs, mod); if (err < 0) - goto cleanup; + goto free_modinfo; flush_module_icache(mod); mod->args = info.args; + mod->state = MODULE_STATE_COMING; + /* Now sew it into the lists so we can get lockdep and oops * info during argument parsing. Noone should access us, since * strong_try_module_get() will fail. @@ -2666,8 +2698,9 @@ static noinline struct module *load_module(void __user *umod, add_sect_attrs(mod, info.hdr->e_shnum, info.secstrings, info.sechdrs); add_notes_attrs(mod, info.hdr->e_shnum, info.secstrings, info.sechdrs); - /* Get rid of temporary copy */ - vfree(info.hdr); + /* Get rid of temporary copy and strmap. */ + kfree(info.strmap); + free_copy(&info); trace_module_load(mod); @@ -2684,21 +2717,14 @@ static noinline struct module *load_module(void __user *umod, mutex_unlock(&module_mutex); synchronize_sched(); module_arch_cleanup(mod); - cleanup: + free_modinfo: free_modinfo(mod); free_unload: module_unload_free(mod); - free_init: - module_free(mod, mod->module_init); - module_free(mod, mod->module_core); - /* mod will be freed with core. Don't access it beyond this line! */ - free_percpu: - free_percpu(percpu); - free_mod: - kfree(info.args); - kfree(strmap); - free_hdr: - vfree(info.hdr); + free_module: + module_deallocate(mod, &info); + free_copy: + free_copy(&info); return ERR_PTR(err); } -- cgit v1.1 From 8f6d037815466cb25e7de8f00536eca71d94d4c3 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Aug 2010 12:59:09 -0600 Subject: module: sysfs cleanup We change the sysfs functions to take struct load_info, and call them all in mod_sysfs_setup(). We also clean up the #ifdefs a little. Signed-off-by: Rusty Russell --- kernel/module.c | 77 +++++++++++++++++++++++++-------------------------------- 1 file changed, 34 insertions(+), 43 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index fb11e2a..fd8d46c 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1126,8 +1126,9 @@ static const struct kernel_symbol *resolve_symbol_wait(Elf_Shdr *sechdrs, * /sys/module/foo/sections stuff * J. Corbet */ -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) +#ifdef CONFIG_SYSFS +#ifdef CONFIG_KALLSYMS static inline bool sect_empty(const Elf_Shdr *sect) { return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; @@ -1164,8 +1165,7 @@ static void free_sect_attrs(struct module_sect_attrs *sect_attrs) kfree(sect_attrs); } -static void add_sect_attrs(struct module *mod, unsigned int nsect, - char *secstrings, Elf_Shdr *sechdrs) +static void add_sect_attrs(struct module *mod, const struct load_info *info) { unsigned int nloaded = 0, i, size[2]; struct module_sect_attrs *sect_attrs; @@ -1173,8 +1173,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, struct attribute **gattr; /* Count loaded sections and allocate structures */ - for (i = 0; i < nsect; i++) - if (!sect_empty(&sechdrs[i])) + for (i = 0; i < info->hdr->e_shnum; i++) + if (!sect_empty(&info->sechdrs[i])) nloaded++; size[0] = ALIGN(sizeof(*sect_attrs) + nloaded * sizeof(sect_attrs->attrs[0]), @@ -1191,11 +1191,12 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, sect_attrs->nsections = 0; sattr = §_attrs->attrs[0]; gattr = §_attrs->grp.attrs[0]; - for (i = 0; i < nsect; i++) { - if (sect_empty(&sechdrs[i])) + for (i = 0; i < info->hdr->e_shnum; i++) { + Elf_Shdr *sec = &info->sechdrs[i]; + if (sect_empty(sec)) continue; - sattr->address = sechdrs[i].sh_addr; - sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, + sattr->address = sec->sh_addr; + sattr->name = kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL); if (sattr->name == NULL) goto out; @@ -1263,8 +1264,7 @@ static void free_notes_attrs(struct module_notes_attrs *notes_attrs, kfree(notes_attrs); } -static void add_notes_attrs(struct module *mod, unsigned int nsect, - char *secstrings, Elf_Shdr *sechdrs) +static void add_notes_attrs(struct module *mod, const struct load_info *info) { unsigned int notes, loaded, i; struct module_notes_attrs *notes_attrs; @@ -1276,9 +1276,9 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect, /* Count notes sections and allocate structures. */ notes = 0; - for (i = 0; i < nsect; i++) - if (!sect_empty(&sechdrs[i]) && - (sechdrs[i].sh_type == SHT_NOTE)) + for (i = 0; i < info->hdr->e_shnum; i++) + if (!sect_empty(&info->sechdrs[i]) && + (info->sechdrs[i].sh_type == SHT_NOTE)) ++notes; if (notes == 0) @@ -1292,15 +1292,15 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect, notes_attrs->notes = notes; nattr = ¬es_attrs->attrs[0]; - for (loaded = i = 0; i < nsect; ++i) { - if (sect_empty(&sechdrs[i])) + for (loaded = i = 0; i < info->hdr->e_shnum; ++i) { + if (sect_empty(&info->sechdrs[i])) continue; - if (sechdrs[i].sh_type == SHT_NOTE) { + if (info->sechdrs[i].sh_type == SHT_NOTE) { sysfs_bin_attr_init(nattr); nattr->attr.name = mod->sect_attrs->attrs[loaded].name; nattr->attr.mode = S_IRUGO; - nattr->size = sechdrs[i].sh_size; - nattr->private = (void *) sechdrs[i].sh_addr; + nattr->size = info->sechdrs[i].sh_size; + nattr->private = (void *) info->sechdrs[i].sh_addr; nattr->read = module_notes_read; ++nattr; } @@ -1331,8 +1331,8 @@ static void remove_notes_attrs(struct module *mod) #else -static inline void add_sect_attrs(struct module *mod, unsigned int nsect, - char *sectstrings, Elf_Shdr *sechdrs) +static inline void add_sect_attrs(struct module *mod, + const struct load_info *info) { } @@ -1340,17 +1340,16 @@ static inline void remove_sect_attrs(struct module *mod) { } -static inline void add_notes_attrs(struct module *mod, unsigned int nsect, - char *sectstrings, Elf_Shdr *sechdrs) +static inline void add_notes_attrs(struct module *mod, + const struct load_info *info) { } static inline void remove_notes_attrs(struct module *mod) { } -#endif +#endif /* CONFIG_KALLSYMS */ -#ifdef CONFIG_SYSFS static void add_usage_links(struct module *mod) { #ifdef CONFIG_MODULE_UNLOAD @@ -1455,6 +1454,7 @@ out: } static int mod_sysfs_setup(struct module *mod, + const struct load_info *info, struct kernel_param *kparam, unsigned int num_params) { @@ -1479,6 +1479,8 @@ static int mod_sysfs_setup(struct module *mod, goto out_unreg_param; add_usage_links(mod); + add_sect_attrs(mod, info); + add_notes_attrs(mod, info); kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); return 0; @@ -1495,32 +1497,26 @@ out: static void mod_sysfs_fini(struct module *mod) { + remove_notes_attrs(mod); + remove_sect_attrs(mod); kobject_put(&mod->mkobj.kobj); } -#else /* CONFIG_SYSFS */ +#else /* !CONFIG_SYSFS */ -static inline int mod_sysfs_init(struct module *mod) +static int mod_sysfs_init(struct module *mod) { return 0; } -static inline int mod_sysfs_setup(struct module *mod, +static int mod_sysfs_setup(struct module *mod, + const struct load_info *info, struct kernel_param *kparam, unsigned int num_params) { return 0; } -static inline int module_add_modinfo_attrs(struct module *mod) -{ - return 0; -} - -static inline void module_remove_modinfo_attrs(struct module *mod) -{ -} - static void mod_sysfs_fini(struct module *mod) { } @@ -1561,8 +1557,6 @@ static void free_module(struct module *mod) mutex_lock(&module_mutex); stop_machine(__unlink_module, mod, NULL); mutex_unlock(&module_mutex); - remove_notes_attrs(mod); - remove_sect_attrs(mod); mod_kobject_remove(mod); /* Remove dynamic debug info */ @@ -2691,13 +2685,10 @@ static noinline struct module *load_module(void __user *umod, if (err < 0) goto unlink; - err = mod_sysfs_setup(mod, mod->kp, mod->num_kp); + err = mod_sysfs_setup(mod, &info, mod->kp, mod->num_kp); if (err < 0) goto unlink; - add_sect_attrs(mod, info.hdr->e_shnum, info.secstrings, info.sechdrs); - add_notes_attrs(mod, info.hdr->e_shnum, info.secstrings, info.sechdrs); - /* Get rid of temporary copy and strmap. */ kfree(info.strmap); free_copy(&info); -- cgit v1.1 From 36b0360d17dc3928cc96347a18a3a1cdbb7e506d Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Aug 2010 12:59:09 -0600 Subject: module: fix sysfs cleanup for !CONFIG_SYSFS Restore the stub module_remove_modinfo_attrs, remove the now-unused !CONFIG_SYSFS module_sysfs_init. Also, rename mod_kobject_remove() to mod_sysfs_teardown() as it is the logical counterpart to mod_sysfs_setup now. Reported-by: Randy Dunlap Signed-off-by: Rusty Russell --- kernel/module.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index fd8d46c..a64b26c 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1504,11 +1504,6 @@ static void mod_sysfs_fini(struct module *mod) #else /* !CONFIG_SYSFS */ -static int mod_sysfs_init(struct module *mod) -{ - return 0; -} - static int mod_sysfs_setup(struct module *mod, const struct load_info *info, struct kernel_param *kparam, @@ -1521,13 +1516,17 @@ static void mod_sysfs_fini(struct module *mod) { } +static void module_remove_modinfo_attrs(struct module *mod) +{ +} + static void del_usage_links(struct module *mod) { } #endif /* CONFIG_SYSFS */ -static void mod_kobject_remove(struct module *mod) +static void mod_sysfs_teardown(struct module *mod) { del_usage_links(mod); module_remove_modinfo_attrs(mod); @@ -1557,7 +1556,7 @@ static void free_module(struct module *mod) mutex_lock(&module_mutex); stop_machine(__unlink_module, mod, NULL); mutex_unlock(&module_mutex); - mod_kobject_remove(mod); + mod_sysfs_teardown(mod); /* Remove dynamic debug info */ ddebug_remove_module(mod->name); -- cgit v1.1 From 49668688dd5a5f46c72f965835388ed16c596055 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Aug 2010 12:59:10 -0600 Subject: module: pass load_info into other functions Pass the struct load_info into all the other functions in module loading. This neatens things and makes them more consistent. Signed-off-by: Rusty Russell --- kernel/module.c | 372 ++++++++++++++++++++++++-------------------------------- 1 file changed, 157 insertions(+), 215 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index a64b26c..29dd232 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -152,42 +152,38 @@ void __module_put_and_exit(struct module *mod, long code) EXPORT_SYMBOL(__module_put_and_exit); /* Find a module section: 0 means not found. */ -static unsigned int find_sec(Elf_Ehdr *hdr, - Elf_Shdr *sechdrs, - const char *secstrings, - const char *name) +static unsigned int find_sec(const struct load_info *info, const char *name) { unsigned int i; - for (i = 1; i < hdr->e_shnum; i++) + for (i = 1; i < info->hdr->e_shnum; i++) { + Elf_Shdr *shdr = &info->sechdrs[i]; /* Alloc bit cleared means "ignore it." */ - if ((sechdrs[i].sh_flags & SHF_ALLOC) - && strcmp(secstrings+sechdrs[i].sh_name, name) == 0) + if ((shdr->sh_flags & SHF_ALLOC) + && strcmp(info->secstrings + shdr->sh_name, name) == 0) return i; + } return 0; } /* Find a module section, or NULL. */ -static void *section_addr(Elf_Ehdr *hdr, Elf_Shdr *shdrs, - const char *secstrings, const char *name) +static void *section_addr(const struct load_info *info, const char *name) { /* Section 0 has sh_addr 0. */ - return (void *)shdrs[find_sec(hdr, shdrs, secstrings, name)].sh_addr; + return (void *)info->sechdrs[find_sec(info, name)].sh_addr; } /* Find a module section, or NULL. Fill in number of "objects" in section. */ -static void *section_objs(Elf_Ehdr *hdr, - Elf_Shdr *sechdrs, - const char *secstrings, +static void *section_objs(const struct load_info *info, const char *name, size_t object_size, unsigned int *num) { - unsigned int sec = find_sec(hdr, sechdrs, secstrings, name); + unsigned int sec = find_sec(info, name); /* Section 0 has sh_addr 0 and sh_size 0. */ - *num = sechdrs[sec].sh_size / object_size; - return (void *)sechdrs[sec].sh_addr; + *num = info->sechdrs[sec].sh_size / object_size; + return (void *)info->sechdrs[sec].sh_addr; } /* Provided by the linker */ @@ -417,11 +413,9 @@ static void percpu_modfree(struct module *mod) free_percpu(mod->percpu); } -static unsigned int find_pcpusec(Elf_Ehdr *hdr, - Elf_Shdr *sechdrs, - const char *secstrings) +static unsigned int find_pcpusec(struct load_info *info) { - return find_sec(hdr, sechdrs, secstrings, ".data..percpu"); + return find_sec(info, ".data..percpu"); } static void percpu_modcopy(struct module *mod, @@ -481,9 +475,7 @@ static inline int percpu_modalloc(struct module *mod, static inline void percpu_modfree(struct module *mod) { } -static inline unsigned int find_pcpusec(Elf_Ehdr *hdr, - Elf_Shdr *sechdrs, - const char *secstrings) +static unsigned int find_pcpusec(struct load_info *info) { return 0; } @@ -1067,10 +1059,9 @@ static inline int same_magic(const char *amagic, const char *bmagic, #endif /* CONFIG_MODVERSIONS */ /* Resolve a symbol for this module. I.e. if we find one, record usage. */ -static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs, - unsigned int versindex, +static const struct kernel_symbol *resolve_symbol(struct module *mod, + const struct load_info *info, const char *name, - struct module *mod, char ownername[]) { struct module *owner; @@ -1084,7 +1075,8 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs, if (!sym) goto unlock; - if (!check_version(sechdrs, versindex, name, mod, crc, owner)) { + if (!check_version(info->sechdrs, info->index.vers, name, mod, crc, + owner)) { sym = ERR_PTR(-EINVAL); goto getname; } @@ -1103,21 +1095,20 @@ unlock: return sym; } -static const struct kernel_symbol *resolve_symbol_wait(Elf_Shdr *sechdrs, - unsigned int versindex, - const char *name, - struct module *mod) +static const struct kernel_symbol * +resolve_symbol_wait(struct module *mod, + const struct load_info *info, + const char *name) { const struct kernel_symbol *ksym; - char ownername[MODULE_NAME_LEN]; + char owner[MODULE_NAME_LEN]; if (wait_event_interruptible_timeout(module_wq, - !IS_ERR(ksym = resolve_symbol(sechdrs, versindex, name, - mod, ownername)) || - PTR_ERR(ksym) != -EBUSY, + !IS_ERR(ksym = resolve_symbol(mod, info, name, owner)) + || PTR_ERR(ksym) != -EBUSY, 30 * HZ) <= 0) { printk(KERN_WARNING "%s: gave up waiting for init of module %s.\n", - mod->name, ownername); + mod->name, owner); } return ksym; } @@ -1640,25 +1631,23 @@ static int verify_export_symbols(struct module *mod) } /* Change all symbols so that st_value encodes the pointer directly. */ -static int simplify_symbols(Elf_Shdr *sechdrs, - unsigned int symindex, - const char *strtab, - unsigned int versindex, - unsigned int pcpuindex, - struct module *mod) -{ - Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; +static int simplify_symbols(struct module *mod, const struct load_info *info) +{ + Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; + Elf_Sym *sym = (void *)symsec->sh_addr; unsigned long secbase; - unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); + unsigned int i; int ret = 0; const struct kernel_symbol *ksym; - for (i = 1; i < n; i++) { + for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { + const char *name = info->strtab + sym[i].st_name; + switch (sym[i].st_shndx) { case SHN_COMMON: /* We compiled with -fno-common. These are not supposed to happen. */ - DEBUGP("Common symbol: %s\n", strtab + sym[i].st_name); + DEBUGP("Common symbol: %s\n", name); printk("%s: please compile with -fno-common\n", mod->name); ret = -ENOEXEC; @@ -1671,9 +1660,7 @@ static int simplify_symbols(Elf_Shdr *sechdrs, break; case SHN_UNDEF: - ksym = resolve_symbol_wait(sechdrs, versindex, - strtab + sym[i].st_name, - mod); + ksym = resolve_symbol_wait(mod, info, name); /* Ok if resolved. */ if (ksym && !IS_ERR(ksym)) { sym[i].st_value = ksym->value; @@ -1685,17 +1672,16 @@ static int simplify_symbols(Elf_Shdr *sechdrs, break; printk(KERN_WARNING "%s: Unknown symbol %s (err %li)\n", - mod->name, strtab + sym[i].st_name, - PTR_ERR(ksym)); + mod->name, name, PTR_ERR(ksym)); ret = PTR_ERR(ksym) ?: -ENOENT; break; default: /* Divert to percpu allocation if a percpu var. */ - if (sym[i].st_shndx == pcpuindex) + if (sym[i].st_shndx == info->index.pcpu) secbase = (unsigned long)mod_percpu(mod); else - secbase = sechdrs[sym[i].st_shndx].sh_addr; + secbase = info->sechdrs[sym[i].st_shndx].sh_addr; sym[i].st_value += secbase; break; } @@ -1704,33 +1690,29 @@ static int simplify_symbols(Elf_Shdr *sechdrs, return ret; } -static int apply_relocations(struct module *mod, - Elf_Ehdr *hdr, - Elf_Shdr *sechdrs, - unsigned int symindex, - unsigned int strindex) +static int apply_relocations(struct module *mod, const struct load_info *info) { unsigned int i; int err = 0; /* Now do relocations. */ - for (i = 1; i < hdr->e_shnum; i++) { - const char *strtab = (char *)sechdrs[strindex].sh_addr; - unsigned int info = sechdrs[i].sh_info; + for (i = 1; i < info->hdr->e_shnum; i++) { + unsigned int infosec = info->sechdrs[i].sh_info; /* Not a valid relocation section? */ - if (info >= hdr->e_shnum) + if (infosec >= info->hdr->e_shnum) continue; /* Don't bother with non-allocated sections */ - if (!(sechdrs[info].sh_flags & SHF_ALLOC)) + if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) continue; - if (sechdrs[i].sh_type == SHT_REL) - err = apply_relocate(sechdrs, strtab, symindex, i, mod); - else if (sechdrs[i].sh_type == SHT_RELA) - err = apply_relocate_add(sechdrs, strtab, symindex, i, - mod); + if (info->sechdrs[i].sh_type == SHT_REL) + err = apply_relocate(info->sechdrs, info->strtab, + info->index.sym, i, mod); + else if (info->sechdrs[i].sh_type == SHT_RELA) + err = apply_relocate_add(info->sechdrs, info->strtab, + info->index.sym, i, mod); if (err < 0) break; } @@ -1761,10 +1743,7 @@ static long get_offset(struct module *mod, unsigned int *size, might -- code, read-only data, read-write data, small data. Tally sizes, and place the offsets into sh_entsize fields: high bit means it belongs in init. */ -static void layout_sections(struct module *mod, - const Elf_Ehdr *hdr, - Elf_Shdr *sechdrs, - const char *secstrings) +static void layout_sections(struct module *mod, struct load_info *info) { static unsigned long const masks[][2] = { /* NOTE: all executable code must be the first section @@ -1777,21 +1756,22 @@ static void layout_sections(struct module *mod, }; unsigned int m, i; - for (i = 0; i < hdr->e_shnum; i++) - sechdrs[i].sh_entsize = ~0UL; + for (i = 0; i < info->hdr->e_shnum; i++) + info->sechdrs[i].sh_entsize = ~0UL; DEBUGP("Core section allocation order:\n"); for (m = 0; m < ARRAY_SIZE(masks); ++m) { - for (i = 0; i < hdr->e_shnum; ++i) { - Elf_Shdr *s = &sechdrs[i]; + for (i = 0; i < info->hdr->e_shnum; ++i) { + Elf_Shdr *s = &info->sechdrs[i]; + const char *sname = info->secstrings + s->sh_name; if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL - || strstarts(secstrings + s->sh_name, ".init")) + || strstarts(sname, ".init")) continue; s->sh_entsize = get_offset(mod, &mod->core_size, s, i); - DEBUGP("\t%s\n", secstrings + s->sh_name); + DEBUGP("\t%s\n", name); } if (m == 0) mod->core_text_size = mod->core_size; @@ -1799,17 +1779,18 @@ static void layout_sections(struct module *mod, DEBUGP("Init section allocation order:\n"); for (m = 0; m < ARRAY_SIZE(masks); ++m) { - for (i = 0; i < hdr->e_shnum; ++i) { - Elf_Shdr *s = &sechdrs[i]; + for (i = 0; i < info->hdr->e_shnum; ++i) { + Elf_Shdr *s = &info->sechdrs[i]; + const char *sname = info->secstrings + s->sh_name; if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL - || !strstarts(secstrings + s->sh_name, ".init")) + || !strstarts(sname, ".init")) continue; s->sh_entsize = (get_offset(mod, &mod->init_size, s, i) | INIT_OFFSET_MASK); - DEBUGP("\t%s\n", secstrings + s->sh_name); + DEBUGP("\t%s\n", sname); } if (m == 0) mod->init_text_size = mod->init_size; @@ -1848,33 +1829,28 @@ static char *next_string(char *string, unsigned long *secsize) return string; } -static char *get_modinfo(const Elf_Shdr *sechdrs, - unsigned int info, - const char *tag) +static char *get_modinfo(struct load_info *info, const char *tag) { char *p; unsigned int taglen = strlen(tag); - unsigned long size = sechdrs[info].sh_size; + Elf_Shdr *infosec = &info->sechdrs[info->index.info]; + unsigned long size = infosec->sh_size; - for (p = (char *)sechdrs[info].sh_addr; p; p = next_string(p, &size)) { + for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) { if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') return p + taglen + 1; } return NULL; } -static void setup_modinfo(struct module *mod, Elf_Shdr *sechdrs, - unsigned int infoindex) +static void setup_modinfo(struct module *mod, struct load_info *info) { struct module_attribute *attr; int i; for (i = 0; (attr = modinfo_attrs[i]); i++) { if (attr->setup) - attr->setup(mod, - get_modinfo(sechdrs, - infoindex, - attr->attr.name)); + attr->setup(mod, get_modinfo(info, attr->attr.name)); } } @@ -1976,56 +1952,45 @@ static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, return true; } -static unsigned long layout_symtab(struct module *mod, - Elf_Shdr *sechdrs, - unsigned int symindex, - unsigned int strindex, - const Elf_Ehdr *hdr, - const char *secstrings, - unsigned long *pstroffs, - unsigned long *strmap) +static void layout_symtab(struct module *mod, struct load_info *info) { - unsigned long symoffs; - Elf_Shdr *symsect = sechdrs + symindex; - Elf_Shdr *strsect = sechdrs + strindex; + Elf_Shdr *symsect = info->sechdrs + info->index.sym; + Elf_Shdr *strsect = info->sechdrs + info->index.str; const Elf_Sym *src; - const char *strtab; unsigned int i, nsrc, ndst; /* Put symbol section at end of init part of module. */ symsect->sh_flags |= SHF_ALLOC; symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect, - symindex) | INIT_OFFSET_MASK; - DEBUGP("\t%s\n", secstrings + symsect->sh_name); + info->index.sym) | INIT_OFFSET_MASK; + DEBUGP("\t%s\n", info->secstrings + symsect->sh_name); - src = (void *)hdr + symsect->sh_offset; + src = (void *)info->hdr + symsect->sh_offset; nsrc = symsect->sh_size / sizeof(*src); - strtab = (void *)hdr + strsect->sh_offset; for (ndst = i = 1; i < nsrc; ++i, ++src) - if (is_core_symbol(src, sechdrs, hdr->e_shnum)) { + if (is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) { unsigned int j = src->st_name; - while(!__test_and_set_bit(j, strmap) && strtab[j]) + while (!__test_and_set_bit(j, info->strmap) + && info->strtab[j]) ++j; ++ndst; } /* Append room for core symbols at end of core part. */ - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); - mod->core_size = symoffs + ndst * sizeof(Elf_Sym); + info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); + mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); /* Put string table section at end of init part of module. */ strsect->sh_flags |= SHF_ALLOC; strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, - strindex) | INIT_OFFSET_MASK; - DEBUGP("\t%s\n", secstrings + strsect->sh_name); + info->index.str) | INIT_OFFSET_MASK; + DEBUGP("\t%s\n", info->secstrings + strsect->sh_name); /* Append room for core symbols' strings at end of core part. */ - *pstroffs = mod->core_size; - __set_bit(0, strmap); - mod->core_size += bitmap_weight(strmap, strsect->sh_size); - - return symoffs; + info->stroffs = mod->core_size; + __set_bit(0, info->strmap); + mod->core_size += bitmap_weight(info->strmap, strsect->sh_size); } static void add_kallsyms(struct module *mod, struct load_info *info) @@ -2064,16 +2029,8 @@ static void add_kallsyms(struct module *mod, struct load_info *info) *++s = mod->strtab[i]; } #else -static inline unsigned long layout_symtab(struct module *mod, - Elf_Shdr *sechdrs, - unsigned int symindex, - unsigned int strindex, - const Elf_Ehdr *hdr, - const char *secstrings, - unsigned long *pstroffs, - unsigned long *strmap) +static inline void layout_symtab(struct module *mod, struct load_info *info) { - return 0; } static void add_kallsyms(struct module *mod, struct load_info *info) @@ -2113,30 +2070,28 @@ static void *module_alloc_update_bounds(unsigned long size) } #ifdef CONFIG_DEBUG_KMEMLEAK -static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, - const Elf_Shdr *sechdrs, - const char *secstrings) +static void kmemleak_load_module(const struct module *mod, + const struct load_info *info) { unsigned int i; /* only scan the sections containing data */ kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); - for (i = 1; i < hdr->e_shnum; i++) { - if (!(sechdrs[i].sh_flags & SHF_ALLOC)) + for (i = 1; i < info->hdr->e_shnum; i++) { + const char *name = info->secstrings + info->sechdrs[i].sh_name; + if (!(info->sechdrs[i].sh_flags & SHF_ALLOC)) continue; - if (strncmp(secstrings + sechdrs[i].sh_name, ".data", 5) != 0 - && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0) + if (!strstarts(name, ".data") && !strstarts(name, ".bss")) continue; - kmemleak_scan_area((void *)sechdrs[i].sh_addr, - sechdrs[i].sh_size, GFP_KERNEL); + kmemleak_scan_area((void *)info->sechdrs[i].sh_addr, + info->sechdrs[i].sh_size, GFP_KERNEL); } } #else -static inline void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, - Elf_Shdr *sechdrs, - const char *secstrings) +static inline void kmemleak_load_module(const struct module *mod, + const struct load_info *info) { } #endif @@ -2227,8 +2182,8 @@ static int rewrite_section_headers(struct load_info *info) } /* Track but don't keep modinfo and version sections. */ - info->index.vers = find_sec(info->hdr, info->sechdrs, info->secstrings, "__versions"); - info->index.info = find_sec(info->hdr, info->sechdrs, info->secstrings, ".modinfo"); + info->index.vers = find_sec(info, "__versions"); + info->index.info = find_sec(info, ".modinfo"); info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; return 0; @@ -2268,8 +2223,7 @@ static struct module *setup_load_info(struct load_info *info) } } - info->index.mod = find_sec(info->hdr, info->sechdrs, info->secstrings, - ".gnu.linkonce.this_module"); + info->index.mod = find_sec(info, ".gnu.linkonce.this_module"); if (!info->index.mod) { printk(KERN_WARNING "No module found in object\n"); return ERR_PTR(-ENOEXEC); @@ -2283,7 +2237,7 @@ static struct module *setup_load_info(struct load_info *info) return ERR_PTR(-ENOEXEC); } - info->index.pcpu = find_pcpusec(info->hdr, info->sechdrs, info->secstrings); + info->index.pcpu = find_pcpusec(info); /* Check module struct version now, before we try to use module. */ if (!check_modstruct_version(info->sechdrs, info->index.vers, mod)) @@ -2292,11 +2246,9 @@ static struct module *setup_load_info(struct load_info *info) return mod; } -static int check_modinfo(struct module *mod, - const Elf_Shdr *sechdrs, - unsigned int infoindex, unsigned int versindex) +static int check_modinfo(struct module *mod, struct load_info *info) { - const char *modmagic = get_modinfo(sechdrs, infoindex, "vermagic"); + const char *modmagic = get_modinfo(info, "vermagic"); int err; /* This is allowed: modprobe --force will invalidate it. */ @@ -2304,13 +2256,13 @@ static int check_modinfo(struct module *mod, err = try_to_force_load(mod, "bad vermagic"); if (err) return err; - } else if (!same_magic(modmagic, vermagic, versindex)) { + } else if (!same_magic(modmagic, vermagic, info->index.vers)) { printk(KERN_ERR "%s: version magic '%s' should be '%s'\n", mod->name, modmagic, vermagic); return -ENOEXEC; } - if (get_modinfo(sechdrs, infoindex, "staging")) { + if (get_modinfo(info, "staging")) { add_taint_module(mod, TAINT_CRAP); printk(KERN_WARNING "%s: module is from the staging directory," " the quality is unknown, you have been warned.\n", @@ -2318,58 +2270,51 @@ static int check_modinfo(struct module *mod, } /* Set up license info based on the info section */ - set_license(mod, get_modinfo(sechdrs, infoindex, "license")); + set_license(mod, get_modinfo(info, "license")); return 0; } -static void find_module_sections(struct module *mod, Elf_Ehdr *hdr, - Elf_Shdr *sechdrs, const char *secstrings) +static void find_module_sections(struct module *mod, + const struct load_info *info) { - mod->kp = section_objs(hdr, sechdrs, secstrings, "__param", + mod->kp = section_objs(info, "__param", sizeof(*mod->kp), &mod->num_kp); - mod->syms = section_objs(hdr, sechdrs, secstrings, "__ksymtab", + mod->syms = section_objs(info, "__ksymtab", sizeof(*mod->syms), &mod->num_syms); - mod->crcs = section_addr(hdr, sechdrs, secstrings, "__kcrctab"); - mod->gpl_syms = section_objs(hdr, sechdrs, secstrings, "__ksymtab_gpl", + mod->crcs = section_addr(info, "__kcrctab"); + mod->gpl_syms = section_objs(info, "__ksymtab_gpl", sizeof(*mod->gpl_syms), &mod->num_gpl_syms); - mod->gpl_crcs = section_addr(hdr, sechdrs, secstrings, "__kcrctab_gpl"); - mod->gpl_future_syms = section_objs(hdr, sechdrs, secstrings, + mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); + mod->gpl_future_syms = section_objs(info, "__ksymtab_gpl_future", sizeof(*mod->gpl_future_syms), &mod->num_gpl_future_syms); - mod->gpl_future_crcs = section_addr(hdr, sechdrs, secstrings, - "__kcrctab_gpl_future"); + mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future"); #ifdef CONFIG_UNUSED_SYMBOLS - mod->unused_syms = section_objs(hdr, sechdrs, secstrings, - "__ksymtab_unused", + mod->unused_syms = section_objs(info, "__ksymtab_unused", sizeof(*mod->unused_syms), &mod->num_unused_syms); - mod->unused_crcs = section_addr(hdr, sechdrs, secstrings, - "__kcrctab_unused"); - mod->unused_gpl_syms = section_objs(hdr, sechdrs, secstrings, - "__ksymtab_unused_gpl", + mod->unused_crcs = section_addr(info, "__kcrctab_unused"); + mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl", sizeof(*mod->unused_gpl_syms), &mod->num_unused_gpl_syms); - mod->unused_gpl_crcs = section_addr(hdr, sechdrs, secstrings, - "__kcrctab_unused_gpl"); + mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl"); #endif #ifdef CONFIG_CONSTRUCTORS - mod->ctors = section_objs(hdr, sechdrs, secstrings, ".ctors", + mod->ctors = section_objs(info, ".ctors", sizeof(*mod->ctors), &mod->num_ctors); #endif #ifdef CONFIG_TRACEPOINTS - mod->tracepoints = section_objs(hdr, sechdrs, secstrings, - "__tracepoints", + mod->tracepoints = section_objs(info, "__tracepoints", sizeof(*mod->tracepoints), &mod->num_tracepoints); #endif #ifdef CONFIG_EVENT_TRACING - mod->trace_events = section_objs(hdr, sechdrs, secstrings, - "_ftrace_events", + mod->trace_events = section_objs(info, "_ftrace_events", sizeof(*mod->trace_events), &mod->num_trace_events); /* @@ -2381,20 +2326,17 @@ static void find_module_sections(struct module *mod, Elf_Ehdr *hdr, #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD /* sechdrs[0].sh_size is always zero */ - mod->ftrace_callsites = section_objs(hdr, sechdrs, secstrings, - "__mcount_loc", + mod->ftrace_callsites = section_objs(info, "__mcount_loc", sizeof(*mod->ftrace_callsites), &mod->num_ftrace_callsites); #endif - if (section_addr(hdr, sechdrs, secstrings, "__obsparm")) + if (section_addr(info, "__obsparm")) printk(KERN_WARNING "%s: Ignoring obsolete parameters\n", mod->name); } -static int move_module(struct module *mod, - Elf_Ehdr *hdr, Elf_Shdr *sechdrs, - const char *secstrings, unsigned modindex) +static int move_module(struct module *mod, struct load_info *info) { int i; void *ptr; @@ -2430,32 +2372,31 @@ static int move_module(struct module *mod, /* Transfer each section which specifies SHF_ALLOC */ DEBUGP("final section addresses:\n"); - for (i = 0; i < hdr->e_shnum; i++) { + for (i = 0; i < info->hdr->e_shnum; i++) { void *dest; + Elf_Shdr *shdr = &info->sechdrs[i]; - if (!(sechdrs[i].sh_flags & SHF_ALLOC)) + if (!(shdr->sh_flags & SHF_ALLOC)) continue; - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) + if (shdr->sh_entsize & INIT_OFFSET_MASK) dest = mod->module_init - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK); + + (shdr->sh_entsize & ~INIT_OFFSET_MASK); else - dest = mod->module_core + sechdrs[i].sh_entsize; + dest = mod->module_core + shdr->sh_entsize; - if (sechdrs[i].sh_type != SHT_NOBITS) - memcpy(dest, (void *)sechdrs[i].sh_addr, - sechdrs[i].sh_size); + if (shdr->sh_type != SHT_NOBITS) + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); /* Update sh_addr to point to copy in image. */ - sechdrs[i].sh_addr = (unsigned long)dest; + shdr->sh_addr = (unsigned long)dest; DEBUGP("\t0x%lx %s\n", - sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name); + shdr->sh_addr, info->secstrings + shdr->sh_name); } return 0; } -static int check_module_license_and_versions(struct module *mod, - Elf_Shdr *sechdrs) +static int check_module_license_and_versions(struct module *mod) { /* * ndiswrapper is under GPL by itself, but loads proprietary modules. @@ -2512,34 +2453,37 @@ static struct module *layout_and_allocate(struct load_info *info) { /* Module within temporary copy. */ struct module *mod; + Elf_Shdr *pcpusec; int err; mod = setup_load_info(info); if (IS_ERR(mod)) return mod; - err = check_modinfo(mod, info->sechdrs, info->index.info, info->index.vers); + err = check_modinfo(mod, info); if (err) return ERR_PTR(err); /* Allow arches to frob section contents and sizes. */ - err = module_frob_arch_sections(info->hdr, info->sechdrs, info->secstrings, mod); + err = module_frob_arch_sections(info->hdr, info->sechdrs, + info->secstrings, mod); if (err < 0) goto free_args; - if (info->index.pcpu) { + pcpusec = &info->sechdrs[info->index.pcpu]; + if (pcpusec->sh_size) { /* We have a special allocation for this section. */ - err = percpu_modalloc(mod, info->sechdrs[info->index.pcpu].sh_size, - info->sechdrs[info->index.pcpu].sh_addralign); + err = percpu_modalloc(mod, + pcpusec->sh_size, pcpusec->sh_addralign); if (err) goto free_args; - info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; + pcpusec->sh_flags &= ~(unsigned long)SHF_ALLOC; } /* Determine total sizes, and put offsets in sh_entsize. For now this is done generically; there doesn't appear to be any special cases for the architectures. */ - layout_sections(mod, info->hdr, info->sechdrs, info->secstrings); + layout_sections(mod, info); info->strmap = kzalloc(BITS_TO_LONGS(info->sechdrs[info->index.str].sh_size) * sizeof(long), GFP_KERNEL); @@ -2547,17 +2491,16 @@ static struct module *layout_and_allocate(struct load_info *info) err = -ENOMEM; goto free_percpu; } - info->symoffs = layout_symtab(mod, info->sechdrs, info->index.sym, info->index.str, info->hdr, - info->secstrings, &info->stroffs, info->strmap); + layout_symtab(mod, info); /* Allocate and move to the final place */ - err = move_module(mod, info->hdr, info->sechdrs, info->secstrings, info->index.mod); + err = move_module(mod, info); if (err) goto free_strmap; /* Module has been copied to its final place now: return it. */ mod = (void *)info->sechdrs[info->index.mod].sh_addr; - kmemleak_load_module(mod, info->hdr, info->sechdrs, info->secstrings); + kmemleak_load_module(mod, info); return mod; free_strmap: @@ -2605,34 +2548,33 @@ static noinline struct module *load_module(void __user *umod, goto free_copy; } - /* Now we've moved module, initialize linked lists, etc. */ + /* Now module is in final location, initialize linked lists, etc. */ err = module_unload_init(mod); if (err) goto free_module; /* Now we've got everything in the final locations, we can * find optional sections. */ - find_module_sections(mod, info.hdr, info.sechdrs, info.secstrings); + find_module_sections(mod, &info); - err = check_module_license_and_versions(mod, info.sechdrs); + err = check_module_license_and_versions(mod); if (err) goto free_unload; /* Set up MODINFO_ATTR fields */ - setup_modinfo(mod, info.sechdrs, info.index.info); + setup_modinfo(mod, &info); /* Fix up syms, so that st_value is a pointer to location. */ - err = simplify_symbols(info.sechdrs, info.index.sym, info.strtab, info.index.vers, info.index.pcpu, - mod); + err = simplify_symbols(mod, &info); if (err < 0) goto free_modinfo; - err = apply_relocations(mod, info.hdr, info.sechdrs, info.index.sym, info.index.str); + err = apply_relocations(mod, &info); if (err < 0) goto free_modinfo; /* Set up and sort exception table */ - mod->extable = section_objs(info.hdr, info.sechdrs, info.secstrings, "__ex_table", + mod->extable = section_objs(&info, "__ex_table", sizeof(*mod->extable), &mod->num_exentries); sort_extable(mod->extable, mod->extable + mod->num_exentries); @@ -2643,7 +2585,7 @@ static noinline struct module *load_module(void __user *umod, add_kallsyms(mod, &info); if (!mod->taints) - debug = section_objs(info.hdr, info.sechdrs, info.secstrings, "__verbose", + debug = section_objs(&info, "__verbose", sizeof(*debug), &num_debug); err = module_finalize(info.hdr, info.sechdrs, mod); -- cgit v1.1 From 6526c534b2677ca601b7b92851437feb041d02a1 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Aug 2010 12:59:10 -0600 Subject: module: move module args strndup_user to just before use Instead of copying and allocating the args and storing it in load_info, we can just allocate them right before we need them. Signed-off-by: Rusty Russell --- kernel/module.c | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 29dd232..cabafe2 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -114,7 +114,7 @@ struct load_info { Elf_Ehdr *hdr; unsigned long len; Elf_Shdr *sechdrs; - char *secstrings, *args, *strtab; + char *secstrings, *strtab; unsigned long *strmap; unsigned long symoffs, stroffs; struct { @@ -2096,7 +2096,7 @@ static inline void kmemleak_load_module(const struct module *mod, } #endif -/* Sets info->hdr, info->len and info->args. */ +/* Sets info->hdr and info->len. */ static int copy_and_check(struct load_info *info, const void __user *umod, unsigned long len, const char __user *uargs) @@ -2132,13 +2132,6 @@ static int copy_and_check(struct load_info *info, goto free_hdr; } - /* Now copy in args */ - info->args = strndup_user(uargs, ~0UL >> 1); - if (IS_ERR(info->args)) { - err = PTR_ERR(info->args); - goto free_hdr; - } - info->hdr = hdr; info->len = len; return 0; @@ -2150,7 +2143,6 @@ free_hdr: static void free_copy(struct load_info *info) { - kfree(info->args); vfree(info->hdr); } @@ -2468,7 +2460,7 @@ static struct module *layout_and_allocate(struct load_info *info) err = module_frob_arch_sections(info->hdr, info->sechdrs, info->secstrings, mod); if (err < 0) - goto free_args; + goto out; pcpusec = &info->sechdrs[info->index.pcpu]; if (pcpusec->sh_size) { @@ -2476,7 +2468,7 @@ static struct module *layout_and_allocate(struct load_info *info) err = percpu_modalloc(mod, pcpusec->sh_size, pcpusec->sh_addralign); if (err) - goto free_args; + goto out; pcpusec->sh_flags &= ~(unsigned long)SHF_ALLOC; } @@ -2507,8 +2499,7 @@ free_strmap: kfree(info->strmap); free_percpu: percpu_modfree(mod); -free_args: - kfree(info->args); +out: return ERR_PTR(err); } @@ -2594,7 +2585,12 @@ static noinline struct module *load_module(void __user *umod, flush_module_icache(mod); - mod->args = info.args; + /* Now copy in args */ + mod->args = strndup_user(uargs, ~0UL >> 1); + if (IS_ERR(mod->args)) { + err = PTR_ERR(mod->args); + goto free_arch_cleanup; + } mod->state = MODULE_STATE_COMING; @@ -2648,6 +2644,8 @@ static noinline struct module *load_module(void __user *umod, unlock: mutex_unlock(&module_mutex); synchronize_sched(); + kfree(mod->args); + free_arch_cleanup: module_arch_cleanup(mod); free_modinfo: free_modinfo(mod); -- cgit v1.1 From 811d66a0e1e99902d365497eec7884113a2665bd Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Aug 2010 12:59:12 -0600 Subject: module: group post-relocation functions into post_relocation() This simply hoists more code out of load_module; we also put the identification of the extable and dynamic debug table in with the others in find_module_sections(). We move the taint check to the actual add/remove of the dynamic debug info: this is certain (find_module_sections is too early). Signed-off-by: Rusty Russell Cc: Yehuda Sadeh --- kernel/module.c | 56 ++++++++++++++++++++++++++++++++------------------------ 1 file changed, 32 insertions(+), 24 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index cabafe2..f3cba93 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -117,6 +117,8 @@ struct load_info { char *secstrings, *strtab; unsigned long *strmap; unsigned long symoffs, stroffs; + struct _ddebug *debug; + unsigned int num_debug; struct { unsigned int sym, str, mod, vers, info, pcpu; } index; @@ -1993,7 +1995,7 @@ static void layout_symtab(struct module *mod, struct load_info *info) mod->core_size += bitmap_weight(info->strmap, strsect->sh_size); } -static void add_kallsyms(struct module *mod, struct load_info *info) +static void add_kallsyms(struct module *mod, const struct load_info *info) { unsigned int i, ndst; const Elf_Sym *src; @@ -2040,6 +2042,8 @@ static void add_kallsyms(struct module *mod, struct load_info *info) static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num) { + if (!debug) + return; #ifdef CONFIG_DYNAMIC_DEBUG if (ddebug_add_module(debug, num, debug->modname)) printk(KERN_ERR "dynamic debug error adding module: %s\n", @@ -2267,8 +2271,7 @@ static int check_modinfo(struct module *mod, struct load_info *info) return 0; } -static void find_module_sections(struct module *mod, - const struct load_info *info) +static void find_module_sections(struct module *mod, struct load_info *info) { mod->kp = section_objs(info, "__param", sizeof(*mod->kp), &mod->num_kp); @@ -2323,9 +2326,15 @@ static void find_module_sections(struct module *mod, &mod->num_ftrace_callsites); #endif + mod->extable = section_objs(info, "__ex_table", + sizeof(*mod->extable), &mod->num_exentries); + if (section_addr(info, "__obsparm")) printk(KERN_WARNING "%s: Ignoring obsolete parameters\n", mod->name); + + info->debug = section_objs(info, "__verbose", + sizeof(*info->debug), &info->num_debug); } static int move_module(struct module *mod, struct load_info *info) @@ -2512,6 +2521,20 @@ static void module_deallocate(struct module *mod, struct load_info *info) module_free(mod, mod->module_core); } +static int post_relocation(struct module *mod, const struct load_info *info) +{ + sort_extable(mod->extable, mod->extable + mod->num_exentries); + + /* Copy relocated percpu area over. */ + percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, + info->sechdrs[info->index.pcpu].sh_size); + + add_kallsyms(mod, info); + + /* Arch-specific module finalizing. */ + return module_finalize(info->hdr, info->sechdrs, mod); +} + /* Allocate and load the module: note that size of section 0 is always zero, and we rely on this for optional sections. */ static noinline struct module *load_module(void __user *umod, @@ -2521,8 +2544,6 @@ static noinline struct module *load_module(void __user *umod, struct load_info info = { NULL, }; struct module *mod; long err; - struct _ddebug *debug = NULL; - unsigned int num_debug = 0; DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", umod, len, uargs); @@ -2564,22 +2585,7 @@ static noinline struct module *load_module(void __user *umod, if (err < 0) goto free_modinfo; - /* Set up and sort exception table */ - mod->extable = section_objs(&info, "__ex_table", - sizeof(*mod->extable), &mod->num_exentries); - sort_extable(mod->extable, mod->extable + mod->num_exentries); - - /* Finally, copy percpu area over. */ - percpu_modcopy(mod, (void *)info.sechdrs[info.index.pcpu].sh_addr, - info.sechdrs[info.index.pcpu].sh_size); - - add_kallsyms(mod, &info); - - if (!mod->taints) - debug = section_objs(&info, "__verbose", - sizeof(*debug), &num_debug); - - err = module_finalize(info.hdr, info.sechdrs, mod); + err = post_relocation(mod, &info); if (err < 0) goto free_modinfo; @@ -2607,8 +2613,9 @@ static noinline struct module *load_module(void __user *umod, goto unlock; } - if (debug) - dynamic_debug_setup(debug, num_debug); + /* This has to be done once we're sure module name is unique. */ + if (!mod->taints) + dynamic_debug_setup(info.debug, info.num_debug); /* Find duplicate symbols */ err = verify_export_symbols(mod); @@ -2640,7 +2647,8 @@ static noinline struct module *load_module(void __user *umod, /* Unlink carefully: kallsyms could be walking list. */ list_del_rcu(&mod->list); ddebug: - dynamic_debug_remove(debug); + if (!mod->taints) + dynamic_debug_remove(info.debug); unlock: mutex_unlock(&module_mutex); synchronize_sched(); -- cgit v1.1 From 51f3d0f474aaebbc253100fa32a49c8256812330 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 5 Aug 2010 12:59:13 -0600 Subject: module: cleanup comments, remove noinline On my (32-bit x86) machine, sys_init_module() uses 124 bytes of stack once load_module() is inlined. This effectively reverts ffb4ba76 which inlined it due to stack pressure. Signed-off-by: Rusty Russell --- kernel/module.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index f3cba93..d0b5f8d 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1,6 +1,6 @@ /* Copyright (C) 2002 Richard Henderson - Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM. + Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -2523,12 +2523,14 @@ static void module_deallocate(struct module *mod, struct load_info *info) static int post_relocation(struct module *mod, const struct load_info *info) { + /* Sort exception table now relocations are done. */ sort_extable(mod->extable, mod->extable + mod->num_exentries); /* Copy relocated percpu area over. */ percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, info->sechdrs[info->index.pcpu].sh_size); + /* Setup kallsyms-specific fields. */ add_kallsyms(mod, info); /* Arch-specific module finalizing. */ @@ -2537,7 +2539,7 @@ static int post_relocation(struct module *mod, const struct load_info *info) /* Allocate and load the module: note that size of section 0 is always zero, and we rely on this for optional sections. */ -static noinline struct module *load_module(void __user *umod, +static struct module *load_module(void __user *umod, unsigned long len, const char __user *uargs) { @@ -2598,6 +2600,7 @@ static noinline struct module *load_module(void __user *umod, goto free_arch_cleanup; } + /* Mark state as coming so strong_try_module_get() ignores us. */ mod->state = MODULE_STATE_COMING; /* Now sew it into the lists so we can get lockdep and oops @@ -2625,10 +2628,12 @@ static noinline struct module *load_module(void __user *umod, list_add_rcu(&mod->list, &modules); mutex_unlock(&module_mutex); + /* Module is ready to execute: parsing args may do that. */ err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, NULL); if (err < 0) goto unlink; + /* Link in to syfs. */ err = mod_sysfs_setup(mod, &info, mod->kp, mod->num_kp); if (err < 0) goto unlink; @@ -2637,9 +2642,8 @@ static noinline struct module *load_module(void __user *umod, kfree(info.strmap); free_copy(&info); - trace_module_load(mod); - /* Done! */ + trace_module_load(mod); return mod; unlink: -- cgit v1.1 From 034260d6779087431a8b2f67589c68b919299e5c Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Thu, 3 Jun 2010 22:11:25 -0700 Subject: printk: fix delayed messages from CPU hotplug events When a secondary CPU is being brought up, it is not uncommon for printk() to be invoked when cpu_online(smp_processor_id()) == 0. The case that I witnessed personally was on MIPS: http://lkml.org/lkml/2010/5/30/4 If (can_use_console() == 0), printk() will spool its output to log_buf and it will be visible in "dmesg", but that output will NOT be echoed to the console until somebody calls release_console_sem() from a CPU that is online. Therefore, the boot time messages from the new CPU can get stuck in "limbo" for a long time, and might suddenly appear on the screen when a completely unrelated event (e.g. "eth0: link is down") occurs. This patch modifies the console code so that any pending messages are automatically flushed out to the console whenever a CPU hotplug operation completes successfully or aborts. The issue was seen on 2.6.34. Original patch by Kevin Cernekee with cleanups by akpm and additional fixes by Santosh Shilimkar. This patch superseeds https://patchwork.linux-mips.org/patch/1357/. Signed-off-by: Kevin Cernekee To: To: To: To: To: Cc: Cc: Reviewed-by: Paul Mundt Signed-off-by: Kevin Cernekee Patchwork: https://patchwork.linux-mips.org/patch/1534/ LKML-Reference: LKML-Reference: Signed-off-by: Ralf Baechle --- kernel/printk.c | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/printk.c b/kernel/printk.c index 444b770..4ab0164 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -37,6 +37,8 @@ #include #include #include +#include +#include #include @@ -985,6 +987,32 @@ void resume_console(void) } /** + * console_cpu_notify - print deferred console messages after CPU hotplug + * @self: notifier struct + * @action: CPU hotplug event + * @hcpu: unused + * + * If printk() is called from a CPU that is not online yet, the messages + * will be spooled but will not show up on the console. This function is + * called when a new CPU comes online (or fails to come up), and ensures + * that any such output gets printed. + */ +static int __cpuinit console_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + switch (action) { + case CPU_ONLINE: + case CPU_DEAD: + case CPU_DYING: + case CPU_DOWN_FAILED: + case CPU_UP_CANCELED: + acquire_console_sem(); + release_console_sem(); + } + return NOTIFY_OK; +} + +/** * acquire_console_sem - lock the console system for exclusive use. * * Acquires a semaphore which guarantees that the caller has @@ -1371,7 +1399,7 @@ int unregister_console(struct console *console) } EXPORT_SYMBOL(unregister_console); -static int __init disable_boot_consoles(void) +static int __init printk_late_init(void) { struct console *con; @@ -1382,9 +1410,10 @@ static int __init disable_boot_consoles(void) unregister_console(con); } } + hotcpu_notifier(console_cpu_notify, 0); return 0; } -late_initcall(disable_boot_consoles); +late_initcall(printk_late_init); #if defined CONFIG_PRINTK -- cgit v1.1 From a9fa20a7af1f152d2d89c44c274a310ac654e3ad Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 5 Aug 2010 09:22:19 -0500 Subject: kgdb: remove custom hex_to_bin()implementation Signed-off-by: Andy Shevchenko Signed-off-by: Jason Wessel --- kernel/debug/gdbstub.c | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c index e8fd686..3517fd7 100644 --- a/kernel/debug/gdbstub.c +++ b/kernel/debug/gdbstub.c @@ -52,17 +52,6 @@ static unsigned long gdb_regs[(NUMREGBYTES + * GDB remote protocol parser: */ -static int hex(char ch) -{ - if ((ch >= 'a') && (ch <= 'f')) - return ch - 'a' + 10; - if ((ch >= '0') && (ch <= '9')) - return ch - '0'; - if ((ch >= 'A') && (ch <= 'F')) - return ch - 'A' + 10; - return -1; -} - #ifdef CONFIG_KGDB_KDB static int gdbstub_read_wait(void) { @@ -123,8 +112,8 @@ static void get_packet(char *buffer) buffer[count] = 0; if (ch == '#') { - xmitcsum = hex(gdbstub_read_wait()) << 4; - xmitcsum += hex(gdbstub_read_wait()); + xmitcsum = hex_to_bin(gdbstub_read_wait()) << 4; + xmitcsum += hex_to_bin(gdbstub_read_wait()); if (checksum != xmitcsum) /* failed checksum */ @@ -280,8 +269,8 @@ int kgdb_hex2mem(char *buf, char *mem, int count) tmp_hex = tmp_raw - 1; while (tmp_hex >= buf) { tmp_raw--; - *tmp_raw = hex(*tmp_hex--); - *tmp_raw |= hex(*tmp_hex--) << 4; + *tmp_raw = hex_to_bin(*tmp_hex--); + *tmp_raw |= hex_to_bin(*tmp_hex--) << 4; } return probe_kernel_write(mem, tmp_raw, count); @@ -304,7 +293,7 @@ int kgdb_hex2long(char **ptr, unsigned long *long_val) (*ptr)++; } while (**ptr) { - hex_val = hex(**ptr); + hex_val = hex_to_bin(**ptr); if (hex_val < 0) break; -- cgit v1.1 From 84a0bd5b2830722cf80ff6ad33ef98101a947e14 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 5 Aug 2010 09:22:19 -0500 Subject: gdbstub: Optimize kgdb's "thread:" response for the gdb serial protocol The gdb debugger understands how to parse short versions of the thread reference string as long as the bytes are paired in sets of two characters. The kgdb implementation was always sending 8 leading zeros which could be omitted, and further optimized in the case of non-negative thread numbers. The negative numbers are used to reference a specific cpu in the case of kgdb. An example of the previous i386 stop packet looks like: T05thread:00000000000003bb; New stop packet response: T05thread:03bb; The previous ThreadInfo response looks like: m00000000fffffffe,0000000000000001,0000000000000002,0000000000000003,0000000000000004,0000000000000005,0000000000000006,0000000000000007,000000000000000c,0000000000000088,000000000000008a,000000000000008b,000000000000008c,000000000000008d,000000000000008e,00000000000000d4,00000000000000d5,00000000000000dd New ThreadInfo response: mfffffffe,01,02,03,04,05,06,07,0c,88,8a,8b,8c,8d,8e,d4,d5,dd A few bytes saved means better response time when using kgdb over a serial line. Signed-off-by: Jason Wessel --- kernel/debug/gdbstub.c | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c index 3517fd7..e117cfd 100644 --- a/kernel/debug/gdbstub.c +++ b/kernel/debug/gdbstub.c @@ -367,28 +367,31 @@ static void error_packet(char *pkt, int error) * remapped to negative TIDs. */ -#define BUF_THREAD_ID_SIZE 16 +#define BUF_THREAD_ID_SIZE 8 static char *pack_threadid(char *pkt, unsigned char *id) { - char *limit; + unsigned char *limit; + int lzero = 1; + + limit = id + (BUF_THREAD_ID_SIZE / 2); + while (id < limit) { + if (!lzero || *id != 0) { + pkt = pack_hex_byte(pkt, *id); + lzero = 0; + } + id++; + } - limit = pkt + BUF_THREAD_ID_SIZE; - while (pkt < limit) - pkt = pack_hex_byte(pkt, *id++); + if (lzero) + pkt = pack_hex_byte(pkt, 0); return pkt; } static void int_to_threadref(unsigned char *id, int value) { - unsigned char *scan; - int i = 4; - - scan = (unsigned char *)id; - while (i--) - *scan++ = 0; - put_unaligned_be32(value, scan); + put_unaligned_be32(value, id); } static struct task_struct *getthread(struct pt_regs *regs, int tid) @@ -601,7 +604,7 @@ static void gdb_cmd_query(struct kgdb_state *ks) { struct task_struct *g; struct task_struct *p; - unsigned char thref[8]; + unsigned char thref[BUF_THREAD_ID_SIZE]; char *ptr; int i; int cpu; @@ -621,8 +624,7 @@ static void gdb_cmd_query(struct kgdb_state *ks) for_each_online_cpu(cpu) { ks->thr_query = 0; int_to_threadref(thref, -cpu - 2); - pack_threadid(ptr, thref); - ptr += BUF_THREAD_ID_SIZE; + ptr = pack_threadid(ptr, thref); *(ptr++) = ','; i++; } @@ -631,8 +633,7 @@ static void gdb_cmd_query(struct kgdb_state *ks) do_each_thread(g, p) { if (i >= ks->thr_query && !finished) { int_to_threadref(thref, p->pid); - pack_threadid(ptr, thref); - ptr += BUF_THREAD_ID_SIZE; + ptr = pack_threadid(ptr, thref); *(ptr++) = ','; ks->thr_query++; if (ks->thr_query % KGDB_MAX_THREAD_QUERY == 0) @@ -851,7 +852,7 @@ int gdb_serial_stub(struct kgdb_state *ks) memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer)); if (kgdb_connected) { - unsigned char thref[8]; + unsigned char thref[BUF_THREAD_ID_SIZE]; char *ptr; /* Reply to host that an exception has occurred */ -- cgit v1.1 From 534af1082329392bc29f6badf815e69ae2ae0f4c Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 5 Aug 2010 09:22:20 -0500 Subject: kgdb,kdb: individual register set and and get API The kdb shell specification includes the ability to get and set architecture specific registers by name. For the time being individual register get and set will be implemented on a per architecture basis. If an architecture defines DBG_MAX_REG_NUM > 0 then kdb and the gdbstub will use the capability for individually getting and setting architecture specific registers. Signed-off-by: Jason Wessel --- kernel/debug/gdbstub.c | 26 +++++++++ kernel/debug/kdb/kdb_main.c | 132 ++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 146 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c index e117cfd..006bad8 100644 --- a/kernel/debug/gdbstub.c +++ b/kernel/debug/gdbstub.c @@ -328,6 +328,32 @@ static int kgdb_ebin2mem(char *buf, char *mem, int count) return probe_kernel_write(mem, c, size); } +#if DBG_MAX_REG_NUM > 0 +void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) +{ + int i; + int idx = 0; + char *ptr = (char *)gdb_regs; + + for (i = 0; i < DBG_MAX_REG_NUM; i++) { + dbg_get_reg(i, ptr + idx, regs); + idx += dbg_reg_def[i].size; + } +} + +void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) +{ + int i; + int idx = 0; + char *ptr = (char *)gdb_regs; + + for (i = 0; i < DBG_MAX_REG_NUM; i++) { + dbg_set_reg(i, ptr + idx, regs); + idx += dbg_reg_def[i].size; + } +} +#endif /* DBG_MAX_REG_NUM > 0 */ + /* Write memory due to an 'M' or 'X' packet. */ static int write_mem_msg(int binary) { diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index ebe4a28..8577e45 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -312,7 +312,7 @@ int kdbgetularg(const char *arg, unsigned long *value) if (endp == arg) { /* - * Try base 16, for us folks too lazy to type the + * Also try base 16, for us folks too lazy to type the * leading 0x... */ val = simple_strtoul(arg, &endp, 16); @@ -325,6 +325,25 @@ int kdbgetularg(const char *arg, unsigned long *value) return 0; } +int kdbgetu64arg(const char *arg, u64 *value) +{ + char *endp; + u64 val; + + val = simple_strtoull(arg, &endp, 0); + + if (endp == arg) { + + val = simple_strtoull(arg, &endp, 16); + if (endp == arg) + return KDB_BADINT; + } + + *value = val; + + return 0; +} + /* * kdb_set - This function implements the 'set' command. Alter an * existing environment variable or create a new one. @@ -1770,11 +1789,65 @@ static int kdb_go(int argc, const char **argv) */ static int kdb_rd(int argc, const char **argv) { - int diag = kdb_check_regs(); - if (diag) - return diag; + int len = kdb_check_regs(); +#if DBG_MAX_REG_NUM > 0 + int i; + char *rname; + int rsize; + u64 reg64; + u32 reg32; + u16 reg16; + u8 reg8; + + if (len) + return len; + + for (i = 0; i < DBG_MAX_REG_NUM; i++) { + rsize = dbg_reg_def[i].size * 2; + if (rsize > 16) + rsize = 2; + if (len + strlen(dbg_reg_def[i].name) + 4 + rsize > 80) { + len = 0; + kdb_printf("\n"); + } + if (len) + len += kdb_printf(" "); + switch(dbg_reg_def[i].size * 8) { + case 8: + rname = dbg_get_reg(i, ®8, kdb_current_regs); + if (!rname) + break; + len += kdb_printf("%s: %02x", rname, reg8); + break; + case 16: + rname = dbg_get_reg(i, ®16, kdb_current_regs); + if (!rname) + break; + len += kdb_printf("%s: %04x", rname, reg16); + break; + case 32: + rname = dbg_get_reg(i, ®32, kdb_current_regs); + if (!rname) + break; + len += kdb_printf("%s: %08x", rname, reg32); + break; + case 64: + rname = dbg_get_reg(i, ®64, kdb_current_regs); + if (!rname) + break; + len += kdb_printf("%s: %016llx", rname, reg64); + break; + default: + len += kdb_printf("%s: ??", dbg_reg_def[i].name); + } + } + kdb_printf("\n"); +#else + if (len) + return len; kdb_dumpregs(kdb_current_regs); +#endif return 0; } @@ -1782,32 +1855,67 @@ static int kdb_rd(int argc, const char **argv) * kdb_rm - This function implements the 'rm' (register modify) command. * rm register-name new-contents * Remarks: - * Currently doesn't allow modification of control or - * debug registers. + * Allows register modification with the same restrictions as gdb */ static int kdb_rm(int argc, const char **argv) { +#if DBG_MAX_REG_NUM > 0 int diag; - int ind = 0; - unsigned long contents; + const char *rname; + int i; + u64 reg64; + u32 reg32; + u16 reg16; + u8 reg8; if (argc != 2) return KDB_ARGCOUNT; /* * Allow presence or absence of leading '%' symbol. */ - if (argv[1][0] == '%') - ind = 1; + rname = argv[1]; + if (*rname == '%') + rname++; - diag = kdbgetularg(argv[2], &contents); + diag = kdbgetu64arg(argv[2], ®64); if (diag) return diag; diag = kdb_check_regs(); if (diag) return diag; + + diag = KDB_BADREG; + for (i = 0; i < DBG_MAX_REG_NUM; i++) { + if (strcmp(rname, dbg_reg_def[i].name) == 0) { + diag = 0; + break; + } + } + if (!diag) { + switch(dbg_reg_def[i].size * 8) { + case 8: + reg8 = reg64; + dbg_set_reg(i, ®8, kdb_current_regs); + break; + case 16: + reg16 = reg64; + dbg_set_reg(i, ®16, kdb_current_regs); + break; + case 32: + reg32 = reg64; + dbg_set_reg(i, ®32, kdb_current_regs); + break; + case 64: + dbg_set_reg(i, ®64, kdb_current_regs); + break; + } + } + return diag; +#else kdb_printf("ERROR: Register set currently not implemented\n"); - return 0; + return 0; +#endif } #if defined(CONFIG_MAGIC_SYSRQ) -- cgit v1.1 From 55751145dc1e08e16df418cdd101661f5c6ac991 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 5 Aug 2010 09:22:21 -0500 Subject: gdbstub: Implement gdbserial 'p' and 'P' packets The gdbserial 'p' and 'P' packets allow gdb to individually get and set registers instead of querying for all the available registers. Signed-off-by: Jason Wessel --- kernel/debug/gdbstub.c | 97 +++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 77 insertions(+), 20 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c index 006bad8..4ef9ddd 100644 --- a/kernel/debug/gdbstub.c +++ b/kernel/debug/gdbstub.c @@ -225,7 +225,7 @@ void gdbstub_msg_write(const char *s, int len) * buf. Return a pointer to the last char put in buf (null). May * return an error. */ -int kgdb_mem2hex(char *mem, char *buf, int count) +char *kgdb_mem2hex(char *mem, char *buf, int count) { char *tmp; int err; @@ -237,17 +237,16 @@ int kgdb_mem2hex(char *mem, char *buf, int count) tmp = buf + count; err = probe_kernel_read(tmp, mem, count); - if (!err) { - while (count > 0) { - buf = pack_hex_byte(buf, *tmp); - tmp++; - count--; - } - - *buf = 0; + if (err) + return NULL; + while (count > 0) { + buf = pack_hex_byte(buf, *tmp); + tmp++; + count--; } + *buf = 0; - return err; + return buf; } /* @@ -481,8 +480,7 @@ static void gdb_cmd_status(struct kgdb_state *ks) pack_hex_byte(&remcom_out_buffer[1], ks->signo); } -/* Handle the 'g' get registers request */ -static void gdb_cmd_getregs(struct kgdb_state *ks) +static void gdb_get_regs_helper(struct kgdb_state *ks) { struct task_struct *thread; void *local_debuggerinfo; @@ -523,6 +521,12 @@ static void gdb_cmd_getregs(struct kgdb_state *ks) */ sleeping_thread_to_gdb_regs(gdb_regs, thread); } +} + +/* Handle the 'g' get registers request */ +static void gdb_cmd_getregs(struct kgdb_state *ks) +{ + gdb_get_regs_helper(ks); kgdb_mem2hex((char *)gdb_regs, remcom_out_buffer, NUMREGBYTES); } @@ -545,13 +549,13 @@ static void gdb_cmd_memread(struct kgdb_state *ks) char *ptr = &remcom_in_buffer[1]; unsigned long length; unsigned long addr; - int err; + char *err; if (kgdb_hex2long(&ptr, &addr) > 0 && *ptr++ == ',' && kgdb_hex2long(&ptr, &length) > 0) { err = kgdb_mem2hex((char *)addr, remcom_out_buffer, length); - if (err) - error_packet(remcom_out_buffer, err); + if (!err) + error_packet(remcom_out_buffer, -EINVAL); } else { error_packet(remcom_out_buffer, -EINVAL); } @@ -568,6 +572,52 @@ static void gdb_cmd_memwrite(struct kgdb_state *ks) strcpy(remcom_out_buffer, "OK"); } +#if DBG_MAX_REG_NUM > 0 +static char *gdb_hex_reg_helper(int regnum, char *out) +{ + int i; + int offset = 0; + + for (i = 0; i < regnum; i++) + offset += dbg_reg_def[i].size; + return kgdb_mem2hex((char *)gdb_regs + offset, out, + dbg_reg_def[i].size); +} + +/* Handle the 'p' individual regster get */ +static void gdb_cmd_reg_get(struct kgdb_state *ks) +{ + unsigned long regnum; + char *ptr = &remcom_in_buffer[1]; + + kgdb_hex2long(&ptr, ®num); + if (regnum >= DBG_MAX_REG_NUM) { + error_packet(remcom_out_buffer, -EINVAL); + return; + } + gdb_get_regs_helper(ks); + gdb_hex_reg_helper(regnum, remcom_out_buffer); +} + +/* Handle the 'P' individual regster set */ +static void gdb_cmd_reg_set(struct kgdb_state *ks) +{ + unsigned long regnum; + char *ptr = &remcom_in_buffer[1]; + + kgdb_hex2long(&ptr, ®num); + if (*ptr++ != '=' || + !(!kgdb_usethread || kgdb_usethread == current) || + !dbg_get_reg(regnum, gdb_regs, ks->linux_regs)) { + error_packet(remcom_out_buffer, -EINVAL); + return; + } + kgdb_hex2mem(ptr, (char *)gdb_regs, dbg_reg_def[regnum].size); + dbg_set_reg(regnum, gdb_regs, ks->linux_regs); + strcpy(remcom_out_buffer, "OK"); +} +#endif /* DBG_MAX_REG_NUM > 0 */ + /* Handle the 'X' memory binary write bytes */ static void gdb_cmd_binwrite(struct kgdb_state *ks) { @@ -874,8 +924,11 @@ int gdb_serial_stub(struct kgdb_state *ks) int error = 0; int tmp; - /* Clear the out buffer. */ + /* Initialize comm buffer and globals. */ memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer)); + kgdb_usethread = kgdb_info[ks->cpu].task; + ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid); + ks->pass_exception = 0; if (kgdb_connected) { unsigned char thref[BUF_THREAD_ID_SIZE]; @@ -892,10 +945,6 @@ int gdb_serial_stub(struct kgdb_state *ks) put_packet(remcom_out_buffer); } - kgdb_usethread = kgdb_info[ks->cpu].task; - ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid); - ks->pass_exception = 0; - while (1) { error = 0; @@ -920,6 +969,14 @@ int gdb_serial_stub(struct kgdb_state *ks) case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA..AA */ gdb_cmd_memwrite(ks); break; +#if DBG_MAX_REG_NUM > 0 + case 'p': /* pXX Return gdb register XX (in hex) */ + gdb_cmd_reg_get(ks); + break; + case 'P': /* PXX=aaaa Set gdb register XX to aaaa (in hex) */ + gdb_cmd_reg_set(ks); + break; +#endif /* DBG_MAX_REG_NUM > 0 */ case 'X': /* XAA..AA,LLLL: Write LLLL bytes at address AA..AA */ gdb_cmd_binwrite(ks); break; -- cgit v1.1 From 6d855b1d83c980c1283d98d2d63a2bd3a87e21b7 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 5 Aug 2010 09:22:22 -0500 Subject: gdbstub: do not directly use dbg_reg_def[] in gdb_cmd_reg_set() Presently the usable registers definitions on x86 are not contiguous for kgdb. The x86 kgdb uses a case statement for the sparse register accesses. The array which defines the registers (dbg_reg_def) should not be used directly in order to safely work with sparse register definitions. Specifically there was a problem when gdb accesses ORIG_AX, which is accessed only through the case statement. This patch encodes register memory using the size information provided from the debugger which avoids the need to look up the size of the register. The dbg_set_reg() function always further validates the inputs from the debugger. Signed-off-by: Jason Wessel Signed-off-by: Dongdong Deng --- kernel/debug/gdbstub.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c index 4ef9ddd..fc7b174 100644 --- a/kernel/debug/gdbstub.c +++ b/kernel/debug/gdbstub.c @@ -604,6 +604,7 @@ static void gdb_cmd_reg_set(struct kgdb_state *ks) { unsigned long regnum; char *ptr = &remcom_in_buffer[1]; + int i = 0; kgdb_hex2long(&ptr, ®num); if (*ptr++ != '=' || @@ -612,7 +613,14 @@ static void gdb_cmd_reg_set(struct kgdb_state *ks) error_packet(remcom_out_buffer, -EINVAL); return; } - kgdb_hex2mem(ptr, (char *)gdb_regs, dbg_reg_def[regnum].size); + memset(gdb_regs, 0, sizeof(gdb_regs)); + while (i < sizeof(gdb_regs) * 2) + if (hex_to_bin(ptr[i]) >= 0) + i++; + else + break; + i = i / 2; + kgdb_hex2mem(ptr, (char *)gdb_regs, i); dbg_set_reg(regnum, gdb_regs, ks->linux_regs); strcpy(remcom_out_buffer, "OK"); } -- cgit v1.1 From 955b61e597984745fb7d34c75708f6503b6aaeab Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 5 Aug 2010 09:22:23 -0500 Subject: ftrace,kdb: Extend kdb to be able to dump the ftrace buffer Add in a helper function to allow the kdb shell to dump the ftrace buffer. Modify trace.c to expose the capability to iterate over the ftrace buffer in a read only capacity. Signed-off-by: Jason Wessel Acked-by: Steven Rostedt CC: Frederic Weisbecker --- kernel/trace/Makefile | 3 ++ kernel/trace/trace.c | 43 ++++++++--------- kernel/trace/trace.h | 19 ++++++++ kernel/trace/trace_kdb.c | 119 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 163 insertions(+), 21 deletions(-) create mode 100644 kernel/trace/trace_kdb.c (limited to 'kernel') diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index ffb1a5b..4215530 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -57,5 +57,8 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o obj-$(CONFIG_EVENT_TRACING) += power-traces.o +ifeq ($(CONFIG_TRACING),y) +obj-$(CONFIG_KGDB_KDB) += trace_kdb.o +endif libftrace-y := ftrace.o diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 086d363..d6736b9 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -101,10 +101,7 @@ static inline void ftrace_enable_cpu(void) preempt_enable(); } -static cpumask_var_t __read_mostly tracing_buffer_mask; - -#define for_each_tracing_cpu(cpu) \ - for_each_cpu(cpu, tracing_buffer_mask) +cpumask_var_t __read_mostly tracing_buffer_mask; /* * ftrace_dump_on_oops - variable to dump ftrace buffer on oops @@ -1539,11 +1536,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) } EXPORT_SYMBOL_GPL(trace_vprintk); -enum trace_file_type { - TRACE_FILE_LAT_FMT = 1, - TRACE_FILE_ANNOTATE = 2, -}; - static void trace_iterator_increment(struct trace_iterator *iter) { /* Don't allow ftrace to trace into the ring buffers */ @@ -1641,7 +1633,7 @@ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, } /* Find the next real entry, and increment the iterator to the next entry */ -static void *find_next_entry_inc(struct trace_iterator *iter) +void *trace_find_next_entry_inc(struct trace_iterator *iter) { iter->ent = __find_next_entry(iter, &iter->cpu, &iter->lost_events, &iter->ts); @@ -1676,19 +1668,19 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) return NULL; if (iter->idx < 0) - ent = find_next_entry_inc(iter); + ent = trace_find_next_entry_inc(iter); else ent = iter; while (ent && iter->idx < i) - ent = find_next_entry_inc(iter); + ent = trace_find_next_entry_inc(iter); iter->pos = *pos; return ent; } -static void tracing_iter_reset(struct trace_iterator *iter, int cpu) +void tracing_iter_reset(struct trace_iterator *iter, int cpu) { struct trace_array *tr = iter->tr; struct ring_buffer_event *event; @@ -2049,7 +2041,7 @@ int trace_empty(struct trace_iterator *iter) } /* Called with trace_event_read_lock() held. */ -static enum print_line_t print_trace_line(struct trace_iterator *iter) +enum print_line_t print_trace_line(struct trace_iterator *iter) { enum print_line_t ret; @@ -3211,7 +3203,7 @@ waitagain: trace_event_read_lock(); trace_access_lock(iter->cpu_file); - while (find_next_entry_inc(iter) != NULL) { + while (trace_find_next_entry_inc(iter) != NULL) { enum print_line_t ret; int len = iter->seq.len; @@ -3294,7 +3286,7 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); rem -= count; - if (!find_next_entry_inc(iter)) { + if (!trace_find_next_entry_inc(iter)) { rem = 0; iter->ent = NULL; break; @@ -3350,7 +3342,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, if (ret <= 0) goto out_err; - if (!iter->ent && !find_next_entry_inc(iter)) { + if (!iter->ent && !trace_find_next_entry_inc(iter)) { ret = -EFAULT; goto out_err; } @@ -4414,7 +4406,7 @@ static struct notifier_block trace_die_notifier = { */ #define KERN_TRACE KERN_EMERG -static void +void trace_printk_seq(struct trace_seq *s) { /* Probably should print a warning here. */ @@ -4429,6 +4421,13 @@ trace_printk_seq(struct trace_seq *s) trace_seq_init(s); } +void trace_init_global_iter(struct trace_iterator *iter) +{ + iter->tr = &global_trace; + iter->trace = current_trace; + iter->cpu_file = TRACE_PIPE_ALL_CPU; +} + static void __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) { @@ -4454,8 +4453,10 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) if (disable_tracing) ftrace_kill(); + trace_init_global_iter(&iter); + for_each_tracing_cpu(cpu) { - atomic_inc(&global_trace.data[cpu]->disabled); + atomic_inc(&iter.tr->data[cpu]->disabled); } old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; @@ -4504,7 +4505,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) iter.iter_flags |= TRACE_FILE_LAT_FMT; iter.pos = -1; - if (find_next_entry_inc(&iter) != NULL) { + if (trace_find_next_entry_inc(&iter) != NULL) { int ret; ret = print_trace_line(&iter); @@ -4526,7 +4527,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) trace_flags |= old_userobj; for_each_tracing_cpu(cpu) { - atomic_dec(&global_trace.data[cpu]->disabled); + atomic_dec(&iter.tr->data[cpu]->disabled); } tracing_on(); } diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 2cd9639..0605fc0 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -338,6 +338,14 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts); +int trace_empty(struct trace_iterator *iter); + +void *trace_find_next_entry_inc(struct trace_iterator *iter); + +void trace_init_global_iter(struct trace_iterator *iter); + +void tracing_iter_reset(struct trace_iterator *iter, int cpu); + void default_wait_pipe(struct trace_iterator *iter); void poll_wait_pipe(struct trace_iterator *iter); @@ -380,6 +388,15 @@ void tracing_start_sched_switch_record(void); int register_tracer(struct tracer *type); void unregister_tracer(struct tracer *type); int is_tracing_stopped(void); +enum trace_file_type { + TRACE_FILE_LAT_FMT = 1, + TRACE_FILE_ANNOTATE = 2, +}; + +extern cpumask_var_t __read_mostly tracing_buffer_mask; + +#define for_each_tracing_cpu(cpu) \ + for_each_cpu(cpu, tracing_buffer_mask) extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); @@ -471,6 +488,8 @@ trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args); int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...); +void trace_printk_seq(struct trace_seq *s); +enum print_line_t print_trace_line(struct trace_iterator *iter); extern unsigned long trace_flags; diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c new file mode 100644 index 0000000..44cbda2 --- /dev/null +++ b/kernel/trace/trace_kdb.c @@ -0,0 +1,119 @@ +/* + * kdb helper for dumping the ftrace buffer + * + * Copyright (C) 2010 Jason Wessel + * + * ftrace_dump_buf based on ftrace_dump: + * Copyright (C) 2007-2008 Steven Rostedt + * Copyright (C) 2008 Ingo Molnar + * + */ +#include +#include +#include +#include + +#include "../debug/kdb/kdb_private.h" +#include "trace.h" +#include "trace_output.h" + +static void ftrace_dump_buf(int skip_lines) +{ + /* use static because iter can be a bit big for the stack */ + static struct trace_iterator iter; + unsigned int old_userobj; + int cnt = 0, cpu; + + trace_init_global_iter(&iter); + + for_each_tracing_cpu(cpu) { + atomic_inc(&iter.tr->data[cpu]->disabled); + } + + old_userobj = trace_flags; + + /* don't look at user memory in panic mode */ + trace_flags &= ~TRACE_ITER_SYM_USEROBJ; + + kdb_printf("Dumping ftrace buffer:\n"); + + /* reset all but tr, trace, and overruns */ + memset(&iter.seq, 0, + sizeof(struct trace_iterator) - + offsetof(struct trace_iterator, seq)); + iter.iter_flags |= TRACE_FILE_LAT_FMT; + iter.pos = -1; + + for_each_tracing_cpu(cpu) { + iter.buffer_iter[cpu] = + ring_buffer_read_prepare(iter.tr->buffer, cpu); + ring_buffer_read_start(iter.buffer_iter[cpu]); + tracing_iter_reset(&iter, cpu); + } + + if (!trace_empty(&iter)) + trace_find_next_entry_inc(&iter); + while (!trace_empty(&iter)) { + if (!cnt) + kdb_printf("---------------------------------\n"); + cnt++; + + if (trace_find_next_entry_inc(&iter) != NULL && !skip_lines) + print_trace_line(&iter); + if (!skip_lines) + trace_printk_seq(&iter.seq); + else + skip_lines--; + if (KDB_FLAG(CMD_INTERRUPT)) + goto out; + } + + if (!cnt) + kdb_printf(" (ftrace buffer empty)\n"); + else + kdb_printf("---------------------------------\n"); + +out: + trace_flags = old_userobj; + + for_each_tracing_cpu(cpu) { + atomic_dec(&iter.tr->data[cpu]->disabled); + } + + for_each_tracing_cpu(cpu) + if (iter.buffer_iter[cpu]) + ring_buffer_read_finish(iter.buffer_iter[cpu]); +} + +/* + * kdb_ftdump - Dump the ftrace log buffer + */ +static int kdb_ftdump(int argc, const char **argv) +{ + int skip_lines = 0; + char *cp; + + if (argc > 1) + return KDB_ARGCOUNT; + + if (argc) { + skip_lines = simple_strtol(argv[1], &cp, 0); + if (*cp) + skip_lines = 0; + } + + kdb_trap_printk++; + ftrace_dump_buf(skip_lines); + kdb_trap_printk--; + + return 0; +} + +static __init int kdb_ftrace_register(void) +{ + kdb_register_repeat("ftdump", kdb_ftdump, "", "Dump ftrace log", + 0, KDB_REPEAT_NONE); + return 0; +} + +late_initcall(kdb_ftrace_register); -- cgit v1.1 From 19063c776fe745fab11216422cf56489ee83b452 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 5 Aug 2010 09:22:23 -0500 Subject: ftrace,kdb: Allow dumping a specific cpu's buffer with ftdump In systems with more than one processor it is desirable to look at the per cpu trace buffers. Signed-off-by: Jason Wessel Acked-by: Steven Rostedt CC: Frederic Weisbecker --- kernel/trace/trace_kdb.c | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c index 44cbda2..7b8ecd7 100644 --- a/kernel/trace/trace_kdb.c +++ b/kernel/trace/trace_kdb.c @@ -17,7 +17,7 @@ #include "trace.h" #include "trace_output.h" -static void ftrace_dump_buf(int skip_lines) +static void ftrace_dump_buf(int skip_lines, long cpu_file) { /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; @@ -44,13 +44,20 @@ static void ftrace_dump_buf(int skip_lines) iter.iter_flags |= TRACE_FILE_LAT_FMT; iter.pos = -1; - for_each_tracing_cpu(cpu) { - iter.buffer_iter[cpu] = + if (cpu_file == TRACE_PIPE_ALL_CPU) { + for_each_tracing_cpu(cpu) { + iter.buffer_iter[cpu] = ring_buffer_read_prepare(iter.tr->buffer, cpu); - ring_buffer_read_start(iter.buffer_iter[cpu]); - tracing_iter_reset(&iter, cpu); + ring_buffer_read_start(iter.buffer_iter[cpu]); + tracing_iter_reset(&iter, cpu); + } + } else { + iter.cpu_file = cpu_file; + iter.buffer_iter[cpu_file] = + ring_buffer_read_prepare(iter.tr->buffer, cpu_file); + ring_buffer_read_start(iter.buffer_iter[cpu_file]); + tracing_iter_reset(&iter, cpu_file); } - if (!trace_empty(&iter)) trace_find_next_entry_inc(&iter); while (!trace_empty(&iter)) { @@ -91,9 +98,10 @@ out: static int kdb_ftdump(int argc, const char **argv) { int skip_lines = 0; + long cpu_file; char *cp; - if (argc > 1) + if (argc > 2) return KDB_ARGCOUNT; if (argc) { @@ -102,8 +110,17 @@ static int kdb_ftdump(int argc, const char **argv) skip_lines = 0; } + if (argc == 2) { + cpu_file = simple_strtol(argv[2], &cp, 0); + if (*cp || cpu_file >= NR_CPUS || cpu_file < 0 || + !cpu_online(cpu_file)) + return KDB_BADINT; + } else { + cpu_file = TRACE_PIPE_ALL_CPU; + } + kdb_trap_printk++; - ftrace_dump_buf(skip_lines); + ftrace_dump_buf(skip_lines, cpu_file); kdb_trap_printk--; return 0; @@ -111,8 +128,8 @@ static int kdb_ftdump(int argc, const char **argv) static __init int kdb_ftrace_register(void) { - kdb_register_repeat("ftdump", kdb_ftdump, "", "Dump ftrace log", - 0, KDB_REPEAT_NONE); + kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", + "Dump ftrace log", 0, KDB_REPEAT_NONE); return 0; } -- cgit v1.1 From 3fa43aba08c5b5a4b407e402606fbe463239b14a Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 5 Aug 2010 09:22:25 -0500 Subject: debug_core,kdb: fix crash when arch does not have single step When an arch such as mips and microblaze does not implement either HW or software single stepping the debug core should re-enter kdb. The kdb code will properly ignore the single step operation. Attempting to single step the kernel without software or hardware support causes unpredictable kernel crashes. Signed-off-by: Jason Wessel --- kernel/debug/debug_core.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 8bc5eef..9ed9307 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -605,6 +605,8 @@ cpu_master_loop: if (dbg_kdb_mode) { kgdb_connected = 1; error = kdb_stub(ks); + if (error == -1) + continue; kgdb_connected = 0; } else { error = gdb_serial_stub(ks); -- cgit v1.1 From 81d4450732c68aa728f2c86c0c2993c6cfc3d032 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 5 Aug 2010 09:22:30 -0500 Subject: vt,console,kdb: automatically set kdb LINES variable The kernel console interface stores the number of lines it is configured to use. The kdb debugger can greatly benefit by knowing how many lines there are on the console for the pager functionality without having the end user compile in the setting or have to repeatedly change it at run time. Signed-off-by: Jason Wessel Signed-off-by: Jesse Barnes CC: David Airlie CC: Andrew Morton --- kernel/debug/kdb/kdb_private.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index 97d3ba6..c438f54 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h @@ -144,9 +144,7 @@ extern int kdb_getword(unsigned long *, unsigned long, size_t); extern int kdb_putword(unsigned long, unsigned long, size_t); extern int kdbgetularg(const char *, unsigned long *); -extern int kdb_set(int, const char **); extern char *kdbgetenv(const char *); -extern int kdbgetintenv(const char *, int *); extern int kdbgetaddrarg(int, const char **, int*, unsigned long *, long *, char **); extern int kdbgetsymval(const char *, kdb_symtab_t *); -- cgit v1.1 From 94f17cd7887ca681ea88fda1fd9bf65c0ca161f0 Mon Sep 17 00:00:00 2001 From: Ian Abbott Date: Mon, 7 Jun 2010 12:57:12 +0100 Subject: hotplug: Support kernel/hotplug sysctl variable when !CONFIG_NET The kernel/hotplug sysctl variable (/proc/sys/kernel/hotplug file) was made conditional on CONFIG_NET by commit f743ca5e10f4145e0b3e6d11b9b46171e16af7ce (applied in 2.6.18) to fix problems with undefined references in 2.6.16 when CONFIG_HOTPLUG=y && !CONFIG_NET, but this restriction is no longer needed. This patch makes the kernel/hotplug sysctl variable depend only on CONFIG_HOTPLUG. Signed-off-by: Ian Abbott Acked-by: Randy Dunlap Signed-off-by: Greg Kroah-Hartman --- kernel/sysctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index d24f761f..f73da1c 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -562,7 +562,7 @@ static struct ctl_table kern_table[] = { .extra2 = &one, }, #endif -#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET) +#ifdef CONFIG_HOTPLUG { .procname = "hotplug", .data = &uevent_helper, -- cgit v1.1 From 676db4af043014e852f67ba0349dae0071bd11f3 Mon Sep 17 00:00:00 2001 From: Greg KH Date: Thu, 5 Aug 2010 13:53:35 -0700 Subject: cgroupfs: create /sys/fs/cgroup to mount cgroupfs on We really shouldn't be asking userspace to create new root filesystems. So follow along with all of the other in-kernel filesystems, and provide a mount point in sysfs. For cgroupfs, this should be in /sys/fs/cgroup/ This change provides that mount point when the cgroup filesystem is registered in the kernel. Acked-by: Paul Menage Acked-by: Dhaval Giani Cc: Li Zefan Cc: Lennart Poettering Cc: Kay Sievers Signed-off-by: Greg Kroah-Hartman --- kernel/cgroup.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index a8ce099..d83cab0 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1623,6 +1623,8 @@ static struct file_system_type cgroup_fs_type = { .kill_sb = cgroup_kill_sb, }; +static struct kobject *cgroup_kobj; + static inline struct cgroup *__d_cgrp(struct dentry *dentry) { return dentry->d_fsdata; @@ -3894,9 +3896,18 @@ int __init cgroup_init(void) hhead = css_set_hash(init_css_set.subsys); hlist_add_head(&init_css_set.hlist, hhead); BUG_ON(!init_root_id(&rootnode)); + + cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj); + if (!cgroup_kobj) { + err = -ENOMEM; + goto out; + } + err = register_filesystem(&cgroup_fs_type); - if (err < 0) + if (err < 0) { + kobject_put(cgroup_kobj); goto out; + } proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations); -- cgit v1.1 From 575570f02761bd680ba5731c1dfd4701062e7fb2 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Tue, 27 Jul 2010 16:06:34 +0800 Subject: tracing: Fix an unallocated memory access in function_graph With CONFIG_DEBUG_PAGEALLOC, I observed an unallocated memory access in function_graph trace. It appears we find a small size entry in ring buffer, but we access it as a big size entry. The access overflows the page size and touches an unallocated page. Cc: Signed-off-by: Shaohua Li LKML-Reference: <1280217994.32400.76.camel@sli10-desk.sh.intel.com> [ Added a comment to explain the problem - SDR ] Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions_graph.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 79f4bac..b4c179a 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -507,7 +507,15 @@ get_return_for_leaf(struct trace_iterator *iter, * if the output fails. */ data->ent = *curr; - data->ret = *next; + /* + * If the next event is not a return type, then + * we only care about what type it is. Otherwise we can + * safely copy the entire event. + */ + if (next->ent.type == TRACE_GRAPH_RET) + data->ret = *next; + else + data->ret.ent.type = next->ent.type; } } -- cgit v1.1 From 18fab912d4fa70133df164d2dcf3310be0c38c34 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Wed, 28 Jul 2010 14:14:01 +0800 Subject: tracing: Fix ring_buffer_read_page reading out of page boundary With the configuration: CONFIG_DEBUG_PAGEALLOC=y and Shaohua's patch: [PATCH]x86: make spurious_fault check correct pte bit Function call graph trace with the following will trigger a page fault. # cd /sys/kernel/debug/tracing/ # echo function_graph > current_tracer # cat per_cpu/cpu1/trace_pipe_raw > /dev/null BUG: unable to handle kernel paging request at ffff880006e99000 IP: [] rb_event_length+0x1/0x3f PGD 1b19063 PUD 1b1d063 PMD 3f067 PTE 6e99160 Oops: 0000 [#1] SMP DEBUG_PAGEALLOC last sysfs file: /sys/devices/virtual/net/lo/operstate CPU 1 Modules linked in: Pid: 1982, comm: cat Not tainted 2.6.35-rc6-aes+ #300 /Bochs RIP: 0010:[] [] rb_event_length+0x1/0x3f RSP: 0018:ffff880006475e38 EFLAGS: 00010006 RAX: 0000000000000ff0 RBX: ffff88000786c630 RCX: 000000000000001d RDX: ffff880006e98000 RSI: 0000000000000ff0 RDI: ffff880006e99000 RBP: ffff880006475eb8 R08: 000000145d7008bd R09: 0000000000000000 R10: 0000000000008000 R11: ffffffff815d9336 R12: ffff880006d08000 R13: ffff880006e605d8 R14: 0000000000000000 R15: 0000000000000018 FS: 00007f2b83e456f0(0000) GS:ffff880002100000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: ffff880006e99000 CR3: 00000000064a8000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Process cat (pid: 1982, threadinfo ffff880006474000, task ffff880006e40770) Stack: ffff880006475eb8 ffffffff8108730f 0000000000000ff0 000000145d7008bd <0> ffff880006e98010 ffff880006d08010 0000000000000296 ffff88000786c640 <0> ffffffff81002956 0000000000000000 ffff8800071f4680 ffff8800071f4680 Call Trace: [] ? ring_buffer_read_page+0x15a/0x24a [] ? return_to_handler+0x15/0x2f [] tracing_buffers_read+0xb9/0x164 [] vfs_read+0xaf/0x150 [] return_to_handler+0x0/0x2f [] __bad_area_nosemaphore+0x17e/0x1a1 [] return_to_handler+0x0/0x2f [] bad_area_nosemaphore+0x13/0x15 Code: 80 25 b2 16 b3 00 fe c9 c3 55 48 89 e5 f0 80 0d a4 16 b3 00 02 c9 c3 55 31 c0 48 89 e5 48 83 3d 94 16 b3 00 01 c9 0f 94 c0 c3 55 <8a> 0f 48 89 e5 83 e1 1f b8 08 00 00 00 0f b6 d1 83 fa 1e 74 27 RIP [] rb_event_length+0x1/0x3f RSP CR2: ffff880006e99000 ---[ end trace a6877bb92ccb36bb ]--- The root cause is that ring_buffer_read_page() may read out of page boundary, because the boundary checking is done after reading. This is fixed via doing boundary checking before reading. Reported-by: Shaohua Li Cc: Signed-off-by: Huang Ying LKML-Reference: <1280297641.2771.307.camel@yhuang-dev> Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 1da7b6e..5ec8f1d 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -3868,6 +3868,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer, rpos = reader->read; pos += size; + if (rpos >= commit) + break; + event = rb_reader_event(cpu_buffer); size = rb_event_length(event); } while (len > size); -- cgit v1.1 From 33659ebbae262228eef4e0fe990f393d1f0ed941 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 7 Aug 2010 18:17:56 +0200 Subject: block: remove wrappers for request type/flags Remove all the trivial wrappers for the cmd_type and cmd_flags fields in struct requests. This allows much easier grepping for different request types instead of unwinding through macros. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- kernel/trace/blktrace.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 638711c..4f14994 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -661,10 +661,10 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, if (likely(!bt)) return; - if (blk_discard_rq(rq)) + if (rq->cmd_flags & REQ_DISCARD) rw |= (1 << BIO_RW_DISCARD); - if (blk_pc_request(rq)) { + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { what |= BLK_TC_ACT(BLK_TC_PC); __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw, what, rq->errors, rq->cmd_len, rq->cmd); @@ -925,7 +925,7 @@ void blk_add_driver_data(struct request_queue *q, if (likely(!bt)) return; - if (blk_pc_request(rq)) + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, BLK_TA_DRV_DATA, rq->errors, len, data); else @@ -1730,7 +1730,7 @@ void blk_dump_cmd(char *buf, struct request *rq) int len = rq->cmd_len; unsigned char *cmd = rq->cmd; - if (!blk_pc_request(rq)) { + if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { buf[0] = '\0'; return; } @@ -1779,7 +1779,7 @@ void blk_fill_rwbs_rq(char *rwbs, struct request *rq) int rw = rq->cmd_flags & 0x03; int bytes; - if (blk_discard_rq(rq)) + if (rq->cmd_flags & REQ_DISCARD) rw |= (1 << BIO_RW_DISCARD); bytes = blk_rq_bytes(rq); -- cgit v1.1 From 7b6d91daee5cac6402186ff224c3af39d79f4a0e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 7 Aug 2010 18:20:39 +0200 Subject: block: unify flags for struct bio and struct request Remove the current bio flags and reuse the request flags for the bio, too. This allows to more easily trace the type of I/O from the filesystem down to the block driver. There were two flags in the bio that were missing in the requests: BIO_RW_UNPLUG and BIO_RW_AHEAD. Also I've renamed two request flags that had a superflous RW in them. Note that the flags are in bio.h despite having the REQ_ name - as blkdev.h includes bio.h that is the only way to go for now. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- kernel/power/block_io.c | 2 +- kernel/trace/blktrace.c | 27 +++++++++++++++------------ 2 files changed, 16 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/power/block_io.c b/kernel/power/block_io.c index 97024fd..83bbc7c 100644 --- a/kernel/power/block_io.c +++ b/kernel/power/block_io.c @@ -28,7 +28,7 @@ static int submit(int rw, struct block_device *bdev, sector_t sector, struct page *page, struct bio **bio_chain) { - const int bio_rw = rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); + const int bio_rw = rw | REQ_SYNC | REQ_UNPLUG; struct bio *bio; bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 4f14994..3b4a695 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -169,9 +169,12 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) }; +#define BLK_TC_HARDBARRIER BLK_TC_BARRIER +#define BLK_TC_RAHEAD BLK_TC_AHEAD + /* The ilog2() calls fall out because they're constant */ -#define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \ - (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name)) +#define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \ + (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name)) /* * The worker for the various blk_add_trace*() types. Fills out a @@ -194,9 +197,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, return; what |= ddir_act[rw & WRITE]; - what |= MASK_TC_BIT(rw, BARRIER); - what |= MASK_TC_BIT(rw, SYNCIO); - what |= MASK_TC_BIT(rw, AHEAD); + what |= MASK_TC_BIT(rw, HARDBARRIER); + what |= MASK_TC_BIT(rw, SYNC); + what |= MASK_TC_BIT(rw, RAHEAD); what |= MASK_TC_BIT(rw, META); what |= MASK_TC_BIT(rw, DISCARD); @@ -662,7 +665,7 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, return; if (rq->cmd_flags & REQ_DISCARD) - rw |= (1 << BIO_RW_DISCARD); + rw |= REQ_DISCARD; if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { what |= BLK_TC_ACT(BLK_TC_PC); @@ -1755,20 +1758,20 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) if (rw & WRITE) rwbs[i++] = 'W'; - else if (rw & 1 << BIO_RW_DISCARD) + else if (rw & REQ_DISCARD) rwbs[i++] = 'D'; else if (bytes) rwbs[i++] = 'R'; else rwbs[i++] = 'N'; - if (rw & 1 << BIO_RW_AHEAD) + if (rw & REQ_RAHEAD) rwbs[i++] = 'A'; - if (rw & 1 << BIO_RW_BARRIER) + if (rw & REQ_HARDBARRIER) rwbs[i++] = 'B'; - if (rw & 1 << BIO_RW_SYNCIO) + if (rw & REQ_SYNC) rwbs[i++] = 'S'; - if (rw & 1 << BIO_RW_META) + if (rw & REQ_META) rwbs[i++] = 'M'; rwbs[i] = '\0'; @@ -1780,7 +1783,7 @@ void blk_fill_rwbs_rq(char *rwbs, struct request *rq) int bytes; if (rq->cmd_flags & REQ_DISCARD) - rw |= (1 << BIO_RW_DISCARD); + rw |= REQ_DISCARD; bytes = blk_rq_bytes(rq); -- cgit v1.1 From 62c2a7d969f30163f733c81158254b3095b23e72 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 7 Jul 2010 16:51:26 +0200 Subject: block: push BKL into blktrace ioctls The blktrace driver currently needs the BKL, but we should not need to take that in the block layer, so just push it down into the driver itself. It is quite likely that the BKL is not actually required in blktrace code and could be removed in a follow-on patch. Signed-off-by: Arnd Bergmann Acked-by: Christoph Hellwig Signed-off-by: Jens Axboe --- kernel/trace/blktrace.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 3b4a695..82499a5 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -552,6 +552,41 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, } EXPORT_SYMBOL_GPL(blk_trace_setup); +#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +static int compat_blk_trace_setup(struct request_queue *q, char *name, + dev_t dev, struct block_device *bdev, + char __user *arg) +{ + struct blk_user_trace_setup buts; + struct compat_blk_user_trace_setup cbuts; + int ret; + + if (copy_from_user(&cbuts, arg, sizeof(cbuts))) + return -EFAULT; + + buts = (struct blk_user_trace_setup) { + .act_mask = cbuts.act_mask, + .buf_size = cbuts.buf_size, + .buf_nr = cbuts.buf_nr, + .start_lba = cbuts.start_lba, + .end_lba = cbuts.end_lba, + .pid = cbuts.pid, + }; + memcpy(&buts.name, &cbuts.name, 32); + + ret = do_blk_trace_setup(q, name, dev, bdev, &buts); + if (ret) + return ret; + + if (copy_to_user(arg, &buts.name, 32)) { + blk_trace_remove(q); + return -EFAULT; + } + + return 0; +} +#endif + int blk_trace_startstop(struct request_queue *q, int start) { int ret; @@ -604,6 +639,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) if (!q) return -ENXIO; + lock_kernel(); mutex_lock(&bdev->bd_mutex); switch (cmd) { @@ -611,6 +647,12 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) bdevname(bdev, b); ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); break; +#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) + case BLKTRACESETUP32: + bdevname(bdev, b); + ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); + break; +#endif case BLKTRACESTART: start = 1; case BLKTRACESTOP: @@ -625,6 +667,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) } mutex_unlock(&bdev->bd_mutex); + unlock_kernel(); return ret; } -- cgit v1.1 From 38f51568005873a241a8ae6bf28c9ddd3f6a11ed Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Sun, 8 Aug 2010 14:24:09 +0200 Subject: workqueue: add missing __percpu markup in kernel/workqueue.c works in schecule_on_each_cpu() is a percpu pointer but was missing __percpu markup. Add it. Signed-off-by: Namhyung Kim Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9ca34cd..da6c482 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2568,7 +2568,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on); int schedule_on_each_cpu(work_func_t func) { int cpu; - struct work_struct *works; + struct work_struct __percpu *works; works = alloc_percpu(struct work_struct); if (!works) -- cgit v1.1 From f6500947a9cbb81cfa07ff344f16955d9c6ebe61 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 9 Aug 2010 11:50:34 +0200 Subject: workqueue: workqueue_cpu_callback() should be cpu_notifier instead of hotcpu_notifier Commit 6ee0578b (workqueue: mark init_workqueues as early_initcall) made workqueue SMP initialization depend on workqueue_cpu_callback(), which however was registered as hotcpu_notifier() and didn't get called if CONFIG_HOTPLUG_CPU is not set. This made gcwqs on non-boot CPUs not create their initial workers leading to boot failures. Fix it by making it a cpu_notifier. Signed-off-by: Tejun Heo Reported-and-bisected-by: walt Tested-by: Markus Trippelsdorf --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index da6c482..2994a0e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3527,7 +3527,7 @@ static int __init init_workqueues(void) unsigned int cpu; int i; - hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); + cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); /* initialize gcwqs */ for_each_gcwq_cpu(cpu) { -- cgit v1.1 From ebabe9a9001af0af56c0c2780ca1576246e7a74b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 7 Jul 2010 18:53:11 +0200 Subject: pass a struct path to vfs_statfs We'll need the path to implement the flags field for statvfs support. We do have it available in all callers except: - ecryptfs_statfs. This one doesn't actually need vfs_statfs but just needs to do a caller to the lower filesystem statfs method. - sys_ustat. Add a non-exported statfs_by_dentry helper for it which doesn't won't be able to fill out the flags field later on. In addition rename the helpers for statfs vs fstatfs to do_*statfs instead of the misleading vfs prefix. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- kernel/acct.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/acct.c b/kernel/acct.c index 385b884..fa7eb3d 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -122,7 +122,7 @@ static int check_free_space(struct bsd_acct_struct *acct, struct file *file) spin_unlock(&acct_lock); /* May block */ - if (vfs_statfs(file->f_path.dentry, &sbuf)) + if (vfs_statfs(&file->f_path, &sbuf)) return res; suspend = sbuf.f_blocks * SUSPEND; resume = sbuf.f_blocks * RESUME; -- cgit v1.1 From 8e4228e1edb922afa83366803a1e5b3fa8e987c2 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 9 Aug 2010 17:18:56 -0700 Subject: oom: move sysctl declarations to oom.h The three oom killer sysctl variables (sysctl_oom_dump_tasks, sysctl_oom_kill_allocating_task, and sysctl_panic_on_oom) are better declared in include/linux/oom.h rather than kernel/sysctl.c. Signed-off-by: David Rientjes Acked-by: KOSAKI Motohiro Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sysctl.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 6d850bf..6b005e4 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include @@ -85,9 +86,6 @@ /* External variables not in a header file. */ extern int sysctl_overcommit_memory; extern int sysctl_overcommit_ratio; -extern int sysctl_panic_on_oom; -extern int sysctl_oom_kill_allocating_task; -extern int sysctl_oom_dump_tasks; extern int max_threads; extern int core_uses_pid; extern int suid_dumpable; -- cgit v1.1 From a63d83f427fbce97a6cea0db2e64b0eb8435cd10 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 9 Aug 2010 17:19:46 -0700 Subject: oom: badness heuristic rewrite This a complete rewrite of the oom killer's badness() heuristic which is used to determine which task to kill in oom conditions. The goal is to make it as simple and predictable as possible so the results are better understood and we end up killing the task which will lead to the most memory freeing while still respecting the fine-tuning from userspace. Instead of basing the heuristic on mm->total_vm for each task, the task's rss and swap space is used instead. This is a better indication of the amount of memory that will be freeable if the oom killed task is chosen and subsequently exits. This helps specifically in cases where KDE or GNOME is chosen for oom kill on desktop systems instead of a memory hogging task. The baseline for the heuristic is a proportion of memory that each task is currently using in memory plus swap compared to the amount of "allowable" memory. "Allowable," in this sense, means the system-wide resources for unconstrained oom conditions, the set of mempolicy nodes, the mems attached to current's cpuset, or a memory controller's limit. The proportion is given on a scale of 0 (never kill) to 1000 (always kill), roughly meaning that if a task has a badness() score of 500 that the task consumes approximately 50% of allowable memory resident in RAM or in swap space. The proportion is always relative to the amount of "allowable" memory and not the total amount of RAM systemwide so that mempolicies and cpusets may operate in isolation; they shall not need to know the true size of the machine on which they are running if they are bound to a specific set of nodes or mems, respectively. Root tasks are given 3% extra memory just like __vm_enough_memory() provides in LSMs. In the event of two tasks consuming similar amounts of memory, it is generally better to save root's task. Because of the change in the badness() heuristic's baseline, it is also necessary to introduce a new user interface to tune it. It's not possible to redefine the meaning of /proc/pid/oom_adj with a new scale since the ABI cannot be changed for backward compatability. Instead, a new tunable, /proc/pid/oom_score_adj, is added that ranges from -1000 to +1000. It may be used to polarize the heuristic such that certain tasks are never considered for oom kill while others may always be considered. The value is added directly into the badness() score so a value of -500, for example, means to discount 50% of its memory consumption in comparison to other tasks either on the system, bound to the mempolicy, in the cpuset, or sharing the same memory controller. /proc/pid/oom_adj is changed so that its meaning is rescaled into the units used by /proc/pid/oom_score_adj, and vice versa. Changing one of these per-task tunables will rescale the value of the other to an equivalent meaning. Although /proc/pid/oom_adj was originally defined as a bitshift on the badness score, it now shares the same linear growth as /proc/pid/oom_score_adj but with different granularity. This is required so the ABI is not broken with userspace applications and allows oom_adj to be deprecated for future removal. Signed-off-by: David Rientjes Cc: Nick Piggin Cc: KAMEZAWA Hiroyuki Cc: KOSAKI Motohiro Cc: Oleg Nesterov Cc: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index a82a65c..98b4508 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -899,6 +899,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) tty_audit_fork(sig); sig->oom_adj = current->signal->oom_adj; + sig->oom_score_adj = current->signal->oom_score_adj; return 0; } -- cgit v1.1 From d2997b1042ec150616c1963b5e5e919ffd0b0ebf Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Mon, 9 Aug 2010 17:20:11 -0700 Subject: hibernation: freeze swap at hibernation When taking a memory snapshot in hibernate_snapshot(), all (directly called) memory allocations use GFP_ATOMIC. Hence swap misusage during hibernation never occurs. But from a pessimistic point of view, there is no guarantee that no page allcation has __GFP_WAIT. It is better to have a global indication "we enter hibernation, don't use swap!". This patch tries to freeze new-swap-allocation during hibernation. (All user processes are frozenm so swapin is not a concern). This way, no updates will happen to swap_map[] between hibernate_snapshot() and save_image(). Swap is thawed when swsusp_free() is called. We can be assured that swap corruption will not occur. Signed-off-by: KAMEZAWA Hiroyuki Cc: "Rafael J. Wysocki" Cc: Hugh Dickins Cc: KOSAKI Motohiro Cc: Ondrej Zary Cc: Balbir Singh Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/hibernate.c | 1 + kernel/power/snapshot.c | 1 + kernel/power/swap.c | 6 +++--- 3 files changed, 5 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 8dc31e0..c779639 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -338,6 +338,7 @@ int hibernation_snapshot(int platform_mode) goto Close; suspend_console(); + hibernation_freeze_swap(); saved_mask = clear_gfp_allowed_mask(GFP_IOFS); error = dpm_suspend_start(PMSG_FREEZE); if (error) diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index f6cd6fa..5e7edfb 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1086,6 +1086,7 @@ void swsusp_free(void) buffer = NULL; alloc_normal = 0; alloc_highmem = 0; + hibernation_thaw_swap(); } /* Helper functions used for the shrinking of memory. */ diff --git a/kernel/power/swap.c b/kernel/power/swap.c index e6a5bdf..5d0059e 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap) { unsigned long offset; - offset = swp_offset(get_swap_page_of_type(swap)); + offset = swp_offset(get_swap_for_hibernation(swap)); if (offset) { if (swsusp_extents_insert(offset)) - swap_free(swp_entry(swap, offset)); + swap_free_for_hibernation(swp_entry(swap, offset)); else return swapdev_block(swap, offset); } @@ -163,7 +163,7 @@ void free_all_swap_pages(int swap) ext = container_of(node, struct swsusp_extent, node); rb_erase(node, &swsusp_extents); for (offset = ext->start; offset <= ext->end; offset++) - swap_free(swp_entry(swap, offset)); + swap_free_for_hibernation(swp_entry(swap, offset)); kfree(ext); } -- cgit v1.1 From 2ee7c922f20c96dba56fd378a466790d87f42e84 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 9 Aug 2010 17:20:30 -0700 Subject: sys_personality: remove the bogus checks in sys_personality()->__set_personality() path Cleanup, no functional changes. - __set_personality() always changes ->exec_domain/personality, the special case when ->exec_domain remains the same buys nothing but complicates the code. Unify both cases to simplify the code. - The -EINVAL check in sys_personality() was never right. If we assume that set_personality() can fail we should check the value it returns instead of verifying that task->personality was actually changed. Remove it. Before the previous patch it was possible to hit this case due to overflow problems, but this -EINVAL just indicated the kernel bug. OTOH, probably it makes sense to change lookup_exec_domain() to return ERR_PTR() instead of default_exec_domain if the search in exec_domains list fails, and report this error to the user-space. But this means another user-space change, and we have in-kernel users which need fixes. For example, PER_OSF4 falls into PER_MASK for unkown reason and nobody cares to register this domain. Signed-off-by: Oleg Nesterov Cc: Wenming Zhang Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exec_domain.c | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c index dd62f8e..0dbeae3 100644 --- a/kernel/exec_domain.c +++ b/kernel/exec_domain.c @@ -134,23 +134,14 @@ unregister: return 0; } -int -__set_personality(unsigned int personality) +int __set_personality(unsigned int personality) { - struct exec_domain *ep, *oep; - - ep = lookup_exec_domain(personality); - if (ep == current_thread_info()->exec_domain) { - current->personality = personality; - module_put(ep->module); - return 0; - } + struct exec_domain *oep = current_thread_info()->exec_domain; + current_thread_info()->exec_domain = lookup_exec_domain(personality); current->personality = personality; - oep = current_thread_info()->exec_domain; - current_thread_info()->exec_domain = ep; - module_put(oep->module); + return 0; } @@ -192,11 +183,8 @@ SYSCALL_DEFINE1(personality, unsigned int, personality) { unsigned int old = current->personality; - if (personality != 0xffffffff) { + if (personality != 0xffffffff) set_personality(personality); - if (current->personality != personality) - return -EINVAL; - } return old; } -- cgit v1.1 From 459b37d423104f00e87d1934821bc8739979d0e4 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 9 Aug 2010 17:20:31 -0700 Subject: kernel/range: remove unused definition of ARRAY_SIZE() Remove duplicate definition of ARRAY_SIZE(), which was never used anyway. Signed-off-by: Geert Uytterhoeven Cc: Yinghai Lu Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/range.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'kernel') diff --git a/kernel/range.c b/kernel/range.c index 74e2e61..471b66a 100644 --- a/kernel/range.c +++ b/kernel/range.c @@ -7,10 +7,6 @@ #include -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - int add_range(struct range *range, int az, int nr_range, u64 start, u64 end) { if (start >= end) -- cgit v1.1 From 878ae1274944908e1863b06b03a2c94907afaa20 Mon Sep 17 00:00:00 2001 From: Richard Kennedy Date: Mon, 9 Aug 2010 17:20:34 -0700 Subject: stop_machine: struct cpu_stopper, remove alignment padding on 64 bits Reorder elements in structure cpu_stopper to remove alignment padding on 64 bit builds, this shrinks its size from 40 to 32 bytes saving 8 bytes per cpu. Signed-off-by: Richard Kennedy Acked-by: Tejun Heo Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/stop_machine.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 70f8d90..4372ccb 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -35,9 +35,9 @@ struct cpu_stop_done { /* the actual stopper, one per every possible cpu, enabled on online cpus */ struct cpu_stopper { spinlock_t lock; + bool enabled; /* is this stopper enabled? */ struct list_head works; /* list of pending works */ struct task_struct *thread; /* stopper thread */ - bool enabled; /* is this stopper enabled? */ }; static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); -- cgit v1.1 From 8c4af38e9b2c2a78369c4e2e5706fe539ac64eb2 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 9 Aug 2010 17:20:36 -0700 Subject: gcc-4.6: printk: use stable variable to dump kmsg buffer kmsg_dump takes care to sample the global variables inside a spinlock, but then goes on to use the same variables outside the spinlock region too. Use the correct variable. This will make the race window smaller. Found by gcc 4.6's new warnings. Signed-off-by: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/printk.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/printk.c b/kernel/printk.c index 4ab0164..8fe465a 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -1549,9 +1549,9 @@ void kmsg_dump(enum kmsg_dump_reason reason) chars = logged_chars; spin_unlock_irqrestore(&logbuf_lock, flags); - if (logged_chars > end) { - s1 = log_buf + log_buf_len - logged_chars + end; - l1 = logged_chars - end; + if (chars > end) { + s1 = log_buf + log_buf_len - chars + end; + l1 = chars - end; s2 = log_buf; l2 = end; @@ -1559,8 +1559,8 @@ void kmsg_dump(enum kmsg_dump_reason reason) s1 = ""; l1 = 0; - s2 = log_buf + end - logged_chars; - l2 = logged_chars; + s2 = log_buf + end - chars; + l2 = chars; } if (!spin_trylock_irqsave(&dump_list_lock, flags)) { -- cgit v1.1 From 0caa621065b2cc05d4e53655a34fd989f500b040 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 9 Aug 2010 16:32:50 -0700 Subject: kernel/timer.c: fix kernel-doc function parameter warning Fix kernel-doc warning, add @timer description: Warning(kernel/timer.c:335): No description found for parameter 'timer' Signed-off-by: Randy Dunlap Cc: Thomas Gleixner Signed-off-by: Linus Torvalds --- kernel/timer.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/timer.c b/kernel/timer.c index f1b8afe..97bf05b 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -326,6 +326,7 @@ EXPORT_SYMBOL_GPL(round_jiffies_up_relative); /** * set_timer_slack - set the allowed slack for a timer + * @timer: the timer to be modified * @slack_hz: the amount of time (in jiffies) allowed for rounding * * Set the amount of time, in jiffies, that a certain timer has -- cgit v1.1 From f7ad3c6be90809b53b7f0ae9d4eaa45ce2564a79 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 10 Aug 2010 11:41:36 +0200 Subject: vfs: add helpers to get root and pwd Add three helpers that retrieve a refcounted copy of the root and cwd from the supplied fs_struct. get_fs_root() get_fs_pwd() get_fs_root_and_pwd() Signed-off-by: Miklos Szeredi Signed-off-by: Al Viro --- kernel/auditsc.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/auditsc.c b/kernel/auditsc.c index b87a63b..1b31c13 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1835,13 +1835,8 @@ void __audit_getname(const char *name) context->names[context->name_count].ino = (unsigned long)-1; context->names[context->name_count].osid = 0; ++context->name_count; - if (!context->pwd.dentry) { - read_lock(¤t->fs->lock); - context->pwd = current->fs->pwd; - path_get(¤t->fs->pwd); - read_unlock(¤t->fs->lock); - } - + if (!context->pwd.dentry) + get_fs_pwd(current->fs, &context->pwd); } /* audit_putname - intercept a putname request -- cgit v1.1 From 2e9fb9953df91ef6310da22182ca8f4496907502 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Aug 2010 23:04:10 -0600 Subject: params: don't hand NULL values to param.set callbacks. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit An audit by Dongdong Deng revealed that most driver-author-written param calls don't handle val == NULL (which happens when parameters are specified with no =, eg "foo" instead of "foo=1"). The only real case to use this is boolean, so handle it specially for that case and remove a source of bugs for everyone else. Signed-off-by: Rusty Russell Cc: Dongdong Deng Cc: Américo Wang --- kernel/params.c | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/params.c b/kernel/params.c index 0b30ecd..3c4a9f1 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -58,6 +58,9 @@ static int parse_one(char *param, /* Find parameter */ for (i = 0; i < num_params; i++) { if (parameq(param, params[i].name)) { + /* Noone handled NULL, so do it here. */ + if (!val && params[i].set != param_set_bool) + return -EINVAL; DEBUGP("They are equal! Calling %p\n", params[i].set); return params[i].set(val, ¶ms[i]); @@ -181,7 +184,6 @@ int parse_args(const char *name, tmptype l; \ int ret; \ \ - if (!val) return -EINVAL; \ ret = strtolfn(val, 0, &l); \ if (ret == -EINVAL || ((type)l != l)) \ return -EINVAL; \ @@ -203,12 +205,6 @@ STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, strict_strtoul); int param_set_charp(const char *val, struct kernel_param *kp) { - if (!val) { - printk(KERN_ERR "%s: string parameter expected\n", - kp->name); - return -EINVAL; - } - if (strlen(val) > 1024) { printk(KERN_ERR "%s: string parameter too long\n", kp->name); @@ -309,12 +305,6 @@ static int param_array(const char *name, kp.arg = elem; kp.flags = flags; - /* No equals sign? */ - if (!val) { - printk(KERN_ERR "%s: expects arguments\n", name); - return -EINVAL; - } - *num = 0; /* We expect a comma-separated list of values. */ do { @@ -381,10 +371,6 @@ int param_set_copystring(const char *val, struct kernel_param *kp) { const struct kparam_string *kps = kp->str; - if (!val) { - printk(KERN_ERR "%s: missing param set value\n", kp->name); - return -EINVAL; - } if (strlen(val)+1 > kps->maxlen) { printk(KERN_ERR "%s: string doesn't fit in %u chars.\n", kp->name, kps->maxlen-1); -- cgit v1.1 From a14fe249a8f74269c9e636bcbaa78f5bdb354ce3 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Aug 2010 23:04:11 -0600 Subject: param: move the EXPORT_SYMBOL to after the definitions. This is modern style, and good to do before we start changing things. Signed-off-by: Rusty Russell Reviewed-by: Takashi Iwai Tested-by: Phil Carmody --- kernel/params.c | 39 +++++++++++++-------------------------- 1 file changed, 13 insertions(+), 26 deletions(-) (limited to 'kernel') diff --git a/kernel/params.c b/kernel/params.c index 3c4a9f1..3e78fdb 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -193,7 +193,9 @@ int parse_args(const char *name, int param_get_##name(char *buffer, struct kernel_param *kp) \ { \ return sprintf(buffer, format, *((type *)kp->arg)); \ - } + } \ + EXPORT_SYMBOL(param_set_##name); \ + EXPORT_SYMBOL(param_get_##name) STANDARD_PARAM_DEF(byte, unsigned char, "%c", unsigned long, strict_strtoul); STANDARD_PARAM_DEF(short, short, "%hi", long, strict_strtol); @@ -222,11 +224,13 @@ int param_set_charp(const char *val, struct kernel_param *kp) return 0; } +EXPORT_SYMBOL(param_set_charp); int param_get_charp(char *buffer, struct kernel_param *kp) { return sprintf(buffer, "%s", *((char **)kp->arg)); } +EXPORT_SYMBOL(param_get_charp); /* Actually could be a bool or an int, for historical reasons. */ int param_set_bool(const char *val, struct kernel_param *kp) @@ -254,6 +258,7 @@ int param_set_bool(const char *val, struct kernel_param *kp) *(int *)kp->arg = v; return 0; } +EXPORT_SYMBOL(param_set_bool); int param_get_bool(char *buffer, struct kernel_param *kp) { @@ -266,6 +271,7 @@ int param_get_bool(char *buffer, struct kernel_param *kp) /* Y and N chosen as being relatively non-coder friendly */ return sprintf(buffer, "%c", val ? 'Y' : 'N'); } +EXPORT_SYMBOL(param_get_bool); /* This one must be bool. */ int param_set_invbool(const char *val, struct kernel_param *kp) @@ -281,11 +287,13 @@ int param_set_invbool(const char *val, struct kernel_param *kp) *(bool *)kp->arg = !boolval; return ret; } +EXPORT_SYMBOL(param_set_invbool); int param_get_invbool(char *buffer, struct kernel_param *kp) { return sprintf(buffer, "%c", (*(bool *)kp->arg) ? 'N' : 'Y'); } +EXPORT_SYMBOL(param_get_invbool); /* We break the rule and mangle the string. */ static int param_array(const char *name, @@ -346,6 +354,7 @@ int param_array_set(const char *val, struct kernel_param *kp) arr->elemsize, arr->set, kp->flags, arr->num ?: &temp_num); } +EXPORT_SYMBOL(param_array_set); int param_array_get(char *buffer, struct kernel_param *kp) { @@ -366,6 +375,7 @@ int param_array_get(char *buffer, struct kernel_param *kp) buffer[off] = '\0'; return off; } +EXPORT_SYMBOL(param_array_get); int param_set_copystring(const char *val, struct kernel_param *kp) { @@ -379,12 +389,14 @@ int param_set_copystring(const char *val, struct kernel_param *kp) strcpy(kps->string, val); return 0; } +EXPORT_SYMBOL(param_set_copystring); int param_get_string(char *buffer, struct kernel_param *kp) { const struct kparam_string *kps = kp->str; return strlcpy(buffer, kps->string, kps->maxlen); } +EXPORT_SYMBOL(param_get_string); /* sysfs output in /sys/modules/XYZ/parameters/ */ #define to_module_attr(n) container_of(n, struct module_attribute, attr) @@ -754,28 +766,3 @@ static int __init param_sysfs_init(void) subsys_initcall(param_sysfs_init); #endif /* CONFIG_SYSFS */ - -EXPORT_SYMBOL(param_set_byte); -EXPORT_SYMBOL(param_get_byte); -EXPORT_SYMBOL(param_set_short); -EXPORT_SYMBOL(param_get_short); -EXPORT_SYMBOL(param_set_ushort); -EXPORT_SYMBOL(param_get_ushort); -EXPORT_SYMBOL(param_set_int); -EXPORT_SYMBOL(param_get_int); -EXPORT_SYMBOL(param_set_uint); -EXPORT_SYMBOL(param_get_uint); -EXPORT_SYMBOL(param_set_long); -EXPORT_SYMBOL(param_get_long); -EXPORT_SYMBOL(param_set_ulong); -EXPORT_SYMBOL(param_get_ulong); -EXPORT_SYMBOL(param_set_charp); -EXPORT_SYMBOL(param_get_charp); -EXPORT_SYMBOL(param_set_bool); -EXPORT_SYMBOL(param_get_bool); -EXPORT_SYMBOL(param_set_invbool); -EXPORT_SYMBOL(param_get_invbool); -EXPORT_SYMBOL(param_array_set); -EXPORT_SYMBOL(param_array_get); -EXPORT_SYMBOL(param_set_copystring); -EXPORT_SYMBOL(param_get_string); -- cgit v1.1 From 9bbb9e5a33109b2832e2e63dcc7a132924ab374b Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Aug 2010 23:04:12 -0600 Subject: param: use ops in struct kernel_param, rather than get and set fns directly This is more kernel-ish, saves some space, and also allows us to expand the ops without breaking all the callers who are happy for the new members to be NULL. The few places which defined their own param types are changed to the new scheme (more which crept in recently fixed in following patches). Since we're touching them anyway, we change get() and set() to take a const struct kernel_param (which they really are). This causes some harmless warnings until we fix them (in following patches). To reduce churn, module_param_call creates the ops struct so the callers don't have to change (and casts the functions to reduce warnings). The modern version which takes an ops struct is called module_param_cb. Signed-off-by: Rusty Russell Reviewed-by: Takashi Iwai Tested-by: Phil Carmody Cc: "David S. Miller" Cc: Ville Syrjala Cc: Dmitry Torokhov Cc: Alessandro Rubini Cc: Michal Januszewski Cc: Trond Myklebust Cc: "J. Bruce Fields" Cc: Neil Brown Cc: linux-kernel@vger.kernel.org Cc: linux-input@vger.kernel.org Cc: linux-fbdev-devel@lists.sourceforge.net Cc: linux-nfs@vger.kernel.org Cc: netdev@vger.kernel.org --- kernel/params.c | 90 +++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 62 insertions(+), 28 deletions(-) (limited to 'kernel') diff --git a/kernel/params.c b/kernel/params.c index 3e78fdb..a550698 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -59,11 +59,11 @@ static int parse_one(char *param, for (i = 0; i < num_params; i++) { if (parameq(param, params[i].name)) { /* Noone handled NULL, so do it here. */ - if (!val && params[i].set != param_set_bool) + if (!val && params[i].ops->set != param_set_bool) return -EINVAL; DEBUGP("They are equal! Calling %p\n", - params[i].set); - return params[i].set(val, ¶ms[i]); + params[i].ops->set); + return params[i].ops->set(val, ¶ms[i]); } } @@ -179,7 +179,7 @@ int parse_args(const char *name, /* Lazy bastard, eh? */ #define STANDARD_PARAM_DEF(name, type, format, tmptype, strtolfn) \ - int param_set_##name(const char *val, struct kernel_param *kp) \ + int param_set_##name(const char *val, const struct kernel_param *kp) \ { \ tmptype l; \ int ret; \ @@ -190,12 +190,18 @@ int parse_args(const char *name, *((type *)kp->arg) = l; \ return 0; \ } \ - int param_get_##name(char *buffer, struct kernel_param *kp) \ + int param_get_##name(char *buffer, const struct kernel_param *kp) \ { \ return sprintf(buffer, format, *((type *)kp->arg)); \ } \ + struct kernel_param_ops param_ops_##name = { \ + .set = param_set_##name, \ + .get = param_get_##name, \ + }; \ EXPORT_SYMBOL(param_set_##name); \ - EXPORT_SYMBOL(param_get_##name) + EXPORT_SYMBOL(param_get_##name); \ + EXPORT_SYMBOL(param_ops_##name) + STANDARD_PARAM_DEF(byte, unsigned char, "%c", unsigned long, strict_strtoul); STANDARD_PARAM_DEF(short, short, "%hi", long, strict_strtol); @@ -205,7 +211,7 @@ STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, strict_strtoul); STANDARD_PARAM_DEF(long, long, "%li", long, strict_strtol); STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, strict_strtoul); -int param_set_charp(const char *val, struct kernel_param *kp) +int param_set_charp(const char *val, const struct kernel_param *kp) { if (strlen(val) > 1024) { printk(KERN_ERR "%s: string parameter too long\n", @@ -226,14 +232,20 @@ int param_set_charp(const char *val, struct kernel_param *kp) } EXPORT_SYMBOL(param_set_charp); -int param_get_charp(char *buffer, struct kernel_param *kp) +int param_get_charp(char *buffer, const struct kernel_param *kp) { return sprintf(buffer, "%s", *((char **)kp->arg)); } EXPORT_SYMBOL(param_get_charp); +struct kernel_param_ops param_ops_charp = { + .set = param_set_charp, + .get = param_get_charp, +}; +EXPORT_SYMBOL(param_ops_charp); + /* Actually could be a bool or an int, for historical reasons. */ -int param_set_bool(const char *val, struct kernel_param *kp) +int param_set_bool(const char *val, const struct kernel_param *kp) { bool v; @@ -260,7 +272,7 @@ int param_set_bool(const char *val, struct kernel_param *kp) } EXPORT_SYMBOL(param_set_bool); -int param_get_bool(char *buffer, struct kernel_param *kp) +int param_get_bool(char *buffer, const struct kernel_param *kp) { bool val; if (kp->flags & KPARAM_ISBOOL) @@ -273,8 +285,14 @@ int param_get_bool(char *buffer, struct kernel_param *kp) } EXPORT_SYMBOL(param_get_bool); +struct kernel_param_ops param_ops_bool = { + .set = param_set_bool, + .get = param_get_bool, +}; +EXPORT_SYMBOL(param_ops_bool); + /* This one must be bool. */ -int param_set_invbool(const char *val, struct kernel_param *kp) +int param_set_invbool(const char *val, const struct kernel_param *kp) { int ret; bool boolval; @@ -289,18 +307,24 @@ int param_set_invbool(const char *val, struct kernel_param *kp) } EXPORT_SYMBOL(param_set_invbool); -int param_get_invbool(char *buffer, struct kernel_param *kp) +int param_get_invbool(char *buffer, const struct kernel_param *kp) { return sprintf(buffer, "%c", (*(bool *)kp->arg) ? 'N' : 'Y'); } EXPORT_SYMBOL(param_get_invbool); +struct kernel_param_ops param_ops_invbool = { + .set = param_set_invbool, + .get = param_get_invbool, +}; +EXPORT_SYMBOL(param_ops_invbool); + /* We break the rule and mangle the string. */ static int param_array(const char *name, const char *val, unsigned int min, unsigned int max, void *elem, int elemsize, - int (*set)(const char *, struct kernel_param *kp), + int (*set)(const char *, const struct kernel_param *kp), u16 flags, unsigned int *num) { @@ -345,18 +369,17 @@ static int param_array(const char *name, return 0; } -int param_array_set(const char *val, struct kernel_param *kp) +static int param_array_set(const char *val, const struct kernel_param *kp) { const struct kparam_array *arr = kp->arr; unsigned int temp_num; return param_array(kp->name, val, 1, arr->max, arr->elem, - arr->elemsize, arr->set, kp->flags, + arr->elemsize, arr->ops->set, kp->flags, arr->num ?: &temp_num); } -EXPORT_SYMBOL(param_array_set); -int param_array_get(char *buffer, struct kernel_param *kp) +static int param_array_get(char *buffer, const struct kernel_param *kp) { int i, off, ret; const struct kparam_array *arr = kp->arr; @@ -367,7 +390,7 @@ int param_array_get(char *buffer, struct kernel_param *kp) if (i) buffer[off++] = ','; p.arg = arr->elem + arr->elemsize * i; - ret = arr->get(buffer + off, &p); + ret = arr->ops->get(buffer + off, &p); if (ret < 0) return ret; off += ret; @@ -375,9 +398,14 @@ int param_array_get(char *buffer, struct kernel_param *kp) buffer[off] = '\0'; return off; } -EXPORT_SYMBOL(param_array_get); -int param_set_copystring(const char *val, struct kernel_param *kp) +struct kernel_param_ops param_array_ops = { + .set = param_array_set, + .get = param_array_get, +}; +EXPORT_SYMBOL(param_array_ops); + +int param_set_copystring(const char *val, const struct kernel_param *kp) { const struct kparam_string *kps = kp->str; @@ -391,13 +419,19 @@ int param_set_copystring(const char *val, struct kernel_param *kp) } EXPORT_SYMBOL(param_set_copystring); -int param_get_string(char *buffer, struct kernel_param *kp) +int param_get_string(char *buffer, const struct kernel_param *kp) { const struct kparam_string *kps = kp->str; return strlcpy(buffer, kps->string, kps->maxlen); } EXPORT_SYMBOL(param_get_string); +struct kernel_param_ops param_ops_string = { + .set = param_set_copystring, + .get = param_get_string, +}; +EXPORT_SYMBOL(param_ops_string); + /* sysfs output in /sys/modules/XYZ/parameters/ */ #define to_module_attr(n) container_of(n, struct module_attribute, attr) #define to_module_kobject(n) container_of(n, struct module_kobject, kobj) @@ -407,7 +441,7 @@ extern struct kernel_param __start___param[], __stop___param[]; struct param_attribute { struct module_attribute mattr; - struct kernel_param *param; + const struct kernel_param *param; }; struct module_param_attrs @@ -426,10 +460,10 @@ static ssize_t param_attr_show(struct module_attribute *mattr, int count; struct param_attribute *attribute = to_param_attr(mattr); - if (!attribute->param->get) + if (!attribute->param->ops->get) return -EPERM; - count = attribute->param->get(buf, attribute->param); + count = attribute->param->ops->get(buf, attribute->param); if (count > 0) { strcat(buf, "\n"); ++count; @@ -445,10 +479,10 @@ static ssize_t param_attr_store(struct module_attribute *mattr, int err; struct param_attribute *attribute = to_param_attr(mattr); - if (!attribute->param->set) + if (!attribute->param->ops->set) return -EPERM; - err = attribute->param->set(buf, attribute->param); + err = attribute->param->ops->set(buf, attribute->param); if (!err) return len; return err; @@ -473,7 +507,7 @@ static ssize_t param_attr_store(struct module_attribute *mattr, * if there's an error. */ static __modinit int add_sysfs_param(struct module_kobject *mk, - struct kernel_param *kp, + const struct kernel_param *kp, const char *name) { struct module_param_attrs *new; @@ -555,7 +589,7 @@ static void free_module_param_attrs(struct module_kobject *mk) * /sys/module/[mod->name]/parameters/ */ int module_param_sysfs_setup(struct module *mod, - struct kernel_param *kparam, + const struct kernel_param *kparam, unsigned int num_params) { int i, err; -- cgit v1.1 From e6df34a4429b77fdffb6e05adf263468a3dcda33 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Aug 2010 23:04:17 -0600 Subject: param: add a free hook to kernel_param_ops. This allows us to generalize the KPARAM_KMALLOCED flag, by calling a function on every parameter when a module is unloaded. Signed-off-by: Rusty Russell Reviewed-by: Takashi Iwai Tested-by: Phil Carmody --- kernel/params.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/params.c b/kernel/params.c index a550698..458a09b 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -399,9 +399,20 @@ static int param_array_get(char *buffer, const struct kernel_param *kp) return off; } +static void param_array_free(void *arg) +{ + unsigned int i; + const struct kparam_array *arr = arg; + + if (arr->ops->free) + for (i = 0; i < (arr->num ? *arr->num : arr->max); i++) + arr->ops->free(arr->elem + arr->elemsize * i); +} + struct kernel_param_ops param_array_ops = { .set = param_array_set, .get = param_array_get, + .free = param_array_free, }; EXPORT_SYMBOL(param_array_ops); @@ -634,7 +645,11 @@ void module_param_sysfs_remove(struct module *mod) void destroy_params(const struct kernel_param *params, unsigned num) { - /* FIXME: This should free kmalloced charp parameters. It doesn't. */ + unsigned int i; + + for (i = 0; i < num; i++) + if (params[i].ops->free) + params[i].ops->free(params[i].arg); } static void __init kernel_add_sysfs_param(const char *name, -- cgit v1.1 From a1054322afc8120ea5a50bc84e5beeda54571862 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Aug 2010 23:04:18 -0600 Subject: param: use free hook for charp (fix leak of charp parameters) Instead of using a "I kmalloced this" flag, we keep track of the kmalloced strings and use that list to check if we need to kfree (in practice, the list is very short). This means that kparams can be const again, and plugs a leak. This is important for drivers/usb/gadget/nokia.c which gets modprobe/rmmod'ed frequently on the N9000. Signed-off-by: Rusty Russell Reviewed-by: Takashi Iwai Cc: Artem Bityutskiy Tested-by: Phil Carmody --- kernel/params.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/params.c b/kernel/params.c index 458a09b..ef60db1 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -31,6 +31,45 @@ #define DEBUGP(fmt, a...) #endif +/* This just allows us to keep track of which parameters are kmalloced. */ +struct kmalloced_param { + struct list_head list; + char val[]; +}; +static DEFINE_MUTEX(param_lock); +static LIST_HEAD(kmalloced_params); + +static void *kmalloc_parameter(unsigned int size) +{ + struct kmalloced_param *p; + + p = kmalloc(sizeof(*p) + size, GFP_KERNEL); + if (!p) + return NULL; + + mutex_lock(¶m_lock); + list_add(&p->list, &kmalloced_params); + mutex_unlock(¶m_lock); + + return p->val; +} + +/* Does nothing if parameter wasn't kmalloced above. */ +static void maybe_kfree_parameter(void *param) +{ + struct kmalloced_param *p; + + mutex_lock(¶m_lock); + list_for_each_entry(p, &kmalloced_params, list) { + if (p->val == param) { + list_del(&p->list); + kfree(p); + break; + } + } + mutex_unlock(¶m_lock); +} + static inline char dash2underscore(char c) { if (c == '-') @@ -219,12 +258,15 @@ int param_set_charp(const char *val, const struct kernel_param *kp) return -ENOSPC; } - /* This is a hack. We can't need to strdup in early boot, and we + maybe_kfree_parameter(*(char **)kp->arg); + + /* This is a hack. We can't kmalloc in early boot, and we * don't need to; this mangled commandline is preserved. */ if (slab_is_available()) { - *(char **)kp->arg = kstrdup(val, GFP_KERNEL); + *(char **)kp->arg = kmalloc_parameter(strlen(val)+1); if (!*(char **)kp->arg) return -ENOMEM; + strcpy(*(char **)kp->arg, val); } else *(const char **)kp->arg = val; @@ -238,9 +280,15 @@ int param_get_charp(char *buffer, const struct kernel_param *kp) } EXPORT_SYMBOL(param_get_charp); +static void param_free_charp(void *arg) +{ + maybe_kfree_parameter(*((char **)arg)); +} + struct kernel_param_ops param_ops_charp = { .set = param_set_charp, .get = param_get_charp, + .free = param_free_charp, }; EXPORT_SYMBOL(param_ops_charp); -- cgit v1.1 From 914dcaa84c53f2c3efa6016efcae13fd92a8a17c Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Aug 2010 23:04:18 -0600 Subject: param: make param sections const. Since this section can be read-only (they're in .rodata), they should always have been const. Minor flow-through various functions. Signed-off-by: Rusty Russell Tested-by: Phil Carmody --- kernel/params.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/params.c b/kernel/params.c index ef60db1..a3eeeefc 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -88,7 +88,7 @@ static inline int parameq(const char *input, const char *paramname) static int parse_one(char *param, char *val, - struct kernel_param *params, + const struct kernel_param *params, unsigned num_params, int (*handle_unknown)(char *param, char *val)) { @@ -170,7 +170,7 @@ static char *next_arg(char *args, char **param, char **val) /* Args looks like "foo=bar,bar2 baz=fuz wiz". */ int parse_args(const char *name, char *args, - struct kernel_param *params, + const struct kernel_param *params, unsigned num, int (*unknown)(char *param, char *val)) { -- cgit v1.1 From 907b29eb41aa604477a655bff7345731da94514d Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Aug 2010 23:04:19 -0600 Subject: param: locking for kernel parameters There may be cases (most obviously, sysfs-writable charp parameters) where a module needs to prevent sysfs access to parameters. Rather than express this in terms of a big lock, the functions are expressed in terms of what they protect against. This is clearer, esp. if the implementation changes to a module-level or even param-level lock. Signed-off-by: Rusty Russell Reviewed-by: Takashi Iwai Tested-by: Phil Carmody --- kernel/params.c | 33 ++++++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/params.c b/kernel/params.c index a3eeeefc..08107d1 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -31,12 +31,14 @@ #define DEBUGP(fmt, a...) #endif +/* Protects all parameters, and incidentally kmalloced_param list. */ +static DEFINE_MUTEX(param_lock); + /* This just allows us to keep track of which parameters are kmalloced. */ struct kmalloced_param { struct list_head list; char val[]; }; -static DEFINE_MUTEX(param_lock); static LIST_HEAD(kmalloced_params); static void *kmalloc_parameter(unsigned int size) @@ -47,10 +49,7 @@ static void *kmalloc_parameter(unsigned int size) if (!p) return NULL; - mutex_lock(¶m_lock); list_add(&p->list, &kmalloced_params); - mutex_unlock(¶m_lock); - return p->val; } @@ -59,7 +58,6 @@ static void maybe_kfree_parameter(void *param) { struct kmalloced_param *p; - mutex_lock(¶m_lock); list_for_each_entry(p, &kmalloced_params, list) { if (p->val == param) { list_del(&p->list); @@ -67,7 +65,6 @@ static void maybe_kfree_parameter(void *param) break; } } - mutex_unlock(¶m_lock); } static inline char dash2underscore(char c) @@ -93,6 +90,7 @@ static int parse_one(char *param, int (*handle_unknown)(char *param, char *val)) { unsigned int i; + int err; /* Find parameter */ for (i = 0; i < num_params; i++) { @@ -102,7 +100,10 @@ static int parse_one(char *param, return -EINVAL; DEBUGP("They are equal! Calling %p\n", params[i].ops->set); - return params[i].ops->set(val, ¶ms[i]); + mutex_lock(¶m_lock); + err = params[i].ops->set(val, ¶ms[i]); + mutex_unlock(¶m_lock); + return err; } } @@ -400,6 +401,7 @@ static int param_array(const char *name, /* nul-terminate and parse */ save = val[len]; ((char *)val)[len] = '\0'; + BUG_ON(!mutex_is_locked(¶m_lock)); ret = set(val, &kp); if (ret != 0) @@ -438,6 +440,7 @@ static int param_array_get(char *buffer, const struct kernel_param *kp) if (i) buffer[off++] = ','; p.arg = arr->elem + arr->elemsize * i; + BUG_ON(!mutex_is_locked(¶m_lock)); ret = arr->ops->get(buffer + off, &p); if (ret < 0) return ret; @@ -522,7 +525,9 @@ static ssize_t param_attr_show(struct module_attribute *mattr, if (!attribute->param->ops->get) return -EPERM; + mutex_lock(¶m_lock); count = attribute->param->ops->get(buf, attribute->param); + mutex_unlock(¶m_lock); if (count > 0) { strcat(buf, "\n"); ++count; @@ -541,7 +546,9 @@ static ssize_t param_attr_store(struct module_attribute *mattr, if (!attribute->param->ops->set) return -EPERM; + mutex_lock(¶m_lock); err = attribute->param->ops->set(buf, attribute->param); + mutex_unlock(¶m_lock); if (!err) return len; return err; @@ -555,6 +562,18 @@ static ssize_t param_attr_store(struct module_attribute *mattr, #endif #ifdef CONFIG_SYSFS +void __kernel_param_lock(void) +{ + mutex_lock(¶m_lock); +} +EXPORT_SYMBOL(__kernel_param_lock); + +void __kernel_param_unlock(void) +{ + mutex_unlock(¶m_lock); +} +EXPORT_SYMBOL(__kernel_param_unlock); + /* * add_sysfs_param - add a parameter to sysfs * @mk: struct module_kobject -- cgit v1.1 From e400c28524af2d344b1663b27bf28984fa959a0e Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 10 Aug 2010 18:02:54 -0700 Subject: cgroups: save space for the terminator The original code didn't leave enough space for a NULL terminator. These strings are copied with strcpy() into fixed length buffers in cgroup_root_from_opts(). Signed-off-by: Dan Carpenter Acked-by: Serge E. Hallyn Reviewd-by: KAMEZAWA Hiroyuki Cc: Paul Menage Cc: Li Zefan Cc: Ben Blum Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cgroup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index d83cab0..192f88c 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1102,7 +1102,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) if (opts->release_agent) return -EINVAL; opts->release_agent = - kstrndup(token + 14, PATH_MAX, GFP_KERNEL); + kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL); if (!opts->release_agent) return -ENOMEM; } else if (!strncmp(token, "name=", 5)) { @@ -1123,7 +1123,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) if (opts->name) return -EINVAL; opts->name = kstrndup(name, - MAX_CGROUP_ROOT_NAMELEN, + MAX_CGROUP_ROOT_NAMELEN - 1, GFP_KERNEL); if (!opts->name) return -ENOMEM; -- cgit v1.1 From c7e49c1488ab20342eaaf38f1ca35a207f4c051d Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 10 Aug 2010 18:03:07 -0700 Subject: ptrace: optimize exit_ptrace() for the likely case exit_ptrace() takes tasklist_lock unconditionally. We need this lock to avoid the race with ptrace_traceme(), it acts as a barrier. Change its caller, forget_original_parent(), to call exit_ptrace() under tasklist_lock. Change exit_ptrace() to drop and reacquire this lock if needed. This allows us to add the fastpath list_empty(ptraced) check. In the likely no-tracees case exit_ptrace() just returns and we avoid the lock() + unlock() sequence. "Zhang, Yanmin" suggested to add this check, and he reports that this change adds about 11% improvement in some tests. Suggested-and-tested-by: "Zhang, Yanmin" Signed-off-by: Oleg Nesterov Acked-by: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 7 +++++-- kernel/ptrace.c | 12 +++++++++--- 2 files changed, 14 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index ceffc67..671ed56 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -771,9 +771,12 @@ static void forget_original_parent(struct task_struct *father) struct task_struct *p, *n, *reaper; LIST_HEAD(dead_children); - exit_ptrace(father); - write_lock_irq(&tasklist_lock); + /* + * Note that exit_ptrace() and find_new_reaper() might + * drop tasklist_lock and reacquire it. + */ + exit_ptrace(father); reaper = find_new_reaper(father); list_for_each_entry_safe(p, n, &father->children, sibling) { diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 74a3d69..f34d798 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -324,26 +324,32 @@ int ptrace_detach(struct task_struct *child, unsigned int data) } /* - * Detach all tasks we were using ptrace on. + * Detach all tasks we were using ptrace on. Called with tasklist held + * for writing, and returns with it held too. But note it can release + * and reacquire the lock. */ void exit_ptrace(struct task_struct *tracer) { struct task_struct *p, *n; LIST_HEAD(ptrace_dead); - write_lock_irq(&tasklist_lock); + if (likely(list_empty(&tracer->ptraced))) + return; + list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { if (__ptrace_detach(tracer, p)) list_add(&p->ptrace_entry, &ptrace_dead); } - write_unlock_irq(&tasklist_lock); + write_unlock_irq(&tasklist_lock); BUG_ON(!list_empty(&tracer->ptraced)); list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { list_del_init(&p->ptrace_entry); release_task(p); } + + write_lock_irq(&tasklist_lock); } int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) -- cgit v1.1 From 5fdee8c4a5e1800489ce61963208f8cc55e42ea1 Mon Sep 17 00:00:00 2001 From: Salman Date: Tue, 10 Aug 2010 18:03:16 -0700 Subject: pids: fix a race in pid generation that causes pids to be reused immediately A program that repeatedly forks and waits is susceptible to having the same pid repeated, especially when it competes with another instance of the same program. This is really bad for bash implementation. Furthermore, many shell scripts assume that pid numbers will not be used for some length of time. Race Description: A B // pid == offset == n // pid == offset == n + 1 test_and_set_bit(offset, map->page) test_and_set_bit(offset, map->page); pid_ns->last_pid = pid; pid_ns->last_pid = pid; // pid == n + 1 is freed (wait()) // Next fork()... last = pid_ns->last_pid; // == n pid = last + 1; Code to reproduce it (Running multiple instances is more effective): #include #include #include #include #include #include // The distance mod 32768 between two pids, where the first pid is expected // to be smaller than the second. int PidDistance(pid_t first, pid_t second) { return (second + 32768 - first) % 32768; } int main(int argc, char* argv[]) { int failed = 0; pid_t last_pid = 0; int i; printf("%d\n", sizeof(pid_t)); for (i = 0; i < 10000000; ++i) { if (i % 32786 == 0) printf("Iter: %d\n", i/32768); int child_exit_code = i % 256; pid_t pid = fork(); if (pid == -1) { fprintf(stderr, "fork failed, iteration %d, errno=%d", i, errno); exit(1); } if (pid == 0) { // Child exit(child_exit_code); } else { // Parent if (i > 0) { int distance = PidDistance(last_pid, pid); if (distance == 0 || distance > 30000) { fprintf(stderr, "Unexpected pid sequence: previous fork: pid=%d, " "current fork: pid=%d for iteration=%d.\n", last_pid, pid, i); failed = 1; } } last_pid = pid; int status; int reaped = wait(&status); if (reaped != pid) { fprintf(stderr, "Wait return value: expected pid=%d, " "got %d, iteration %d\n", pid, reaped, i); failed = 1; } else if (WEXITSTATUS(status) != child_exit_code) { fprintf(stderr, "Unexpected exit status %x, iteration %d\n", WEXITSTATUS(status), i); failed = 1; } } } exit(failed); } Thanks to Ted Tso for the key ideas of this implementation. Signed-off-by: Salman Qazi Cc: Ingo Molnar Cc: Theodore Ts'o Cc: Peter Zijlstra Cc: Sukadev Bhattiprolu Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/pid.c | 39 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/pid.c b/kernel/pid.c index e9fd8c1..fbbd5f6 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -122,6 +122,43 @@ static void free_pidmap(struct upid *upid) atomic_inc(&map->nr_free); } +/* + * If we started walking pids at 'base', is 'a' seen before 'b'? + */ +static int pid_before(int base, int a, int b) +{ + /* + * This is the same as saying + * + * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT + * and that mapping orders 'a' and 'b' with respect to 'base'. + */ + return (unsigned)(a - base) < (unsigned)(b - base); +} + +/* + * We might be racing with someone else trying to set pid_ns->last_pid. + * We want the winner to have the "later" value, because if the + * "earlier" value prevails, then a pid may get reused immediately. + * + * Since pids rollover, it is not sufficient to just pick the bigger + * value. We have to consider where we started counting from. + * + * 'base' is the value of pid_ns->last_pid that we observed when + * we started looking for a pid. + * + * 'pid' is the pid that we eventually found. + */ +static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid) +{ + int prev; + int last_write = base; + do { + prev = last_write; + last_write = cmpxchg(&pid_ns->last_pid, prev, pid); + } while ((prev != last_write) && (pid_before(base, last_write, pid))); +} + static int alloc_pidmap(struct pid_namespace *pid_ns) { int i, offset, max_scan, pid, last = pid_ns->last_pid; @@ -154,7 +191,7 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) do { if (!test_and_set_bit(offset, map->page)) { atomic_dec(&map->nr_free); - pid_ns->last_pid = pid; + set_last_pid(pid_ns, last, pid); return pid; } offset = find_next_offset(map, offset); -- cgit v1.1 From c52b0b91ba1f4b7ea90e20385c0a6df0ba54aed4 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 10 Aug 2010 18:03:17 -0700 Subject: pids: alloc_pidmap: remove the unnecessary boundary checks alloc_pidmap() calculates max_scan so that if the initial offset != 0 we inspect the first map->page twice. This is correct, we want to find the unused bits < offset in this bitmap block. Add the comment. But it doesn't make any sense to stop the find_next_offset() loop when we are looking into this map->page for the second time. We have already already checked the bits >= offset during the first attempt, it is fine to do this again, no matter if we succeed this time or not. Remove this hard-to-understand code. It optimizes the very unlikely case when we are going to fail, but slows down the more likely case. Signed-off-by: Oleg Nesterov Cc: Salman Qazi Cc: Ingo Molnar Cc: Sukadev Bhattiprolu Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/pid.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/pid.c b/kernel/pid.c index fbbd5f6..d55c6fb 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -169,7 +169,12 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) pid = RESERVED_PIDS; offset = pid & BITS_PER_PAGE_MASK; map = &pid_ns->pidmap[pid/BITS_PER_PAGE]; - max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset; + /* + * If last_pid points into the middle of the map->page we + * want to scan this bitmap block twice, the second time + * we start with offset == 0 (or RESERVED_PIDS). + */ + max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset; for (i = 0; i <= max_scan; ++i) { if (unlikely(!map->page)) { void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); @@ -196,15 +201,7 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) } offset = find_next_offset(map, offset); pid = mk_pid(pid_ns, map, offset); - /* - * find_next_offset() found a bit, the pid from it - * is in-bounds, and if we fell back to the last - * bitmap block and the final block was the same - * as the starting point, pid is before last_pid. - */ - } while (offset < BITS_PER_PAGE && pid < pid_max && - (i != max_scan || pid < last || - !((last+1) & BITS_PER_PAGE_MASK))); + } while (offset < BITS_PER_PAGE && pid < pid_max); } if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { ++map; -- cgit v1.1 From c7ff0d9c92435e836e13aaa8d0e56d4000424bcc Mon Sep 17 00:00:00 2001 From: TAMUKI Shoichi Date: Tue, 10 Aug 2010 18:03:28 -0700 Subject: panic: keep blinking in spite of long spin timer mode To keep panic_timeout accuracy when running under a hypervisor, the current implementation only spins on long time (1 second) calls to mdelay. That brings a good effect, but the problem is the keyboard LEDs don't blink at all on that situation. This patch changes to call to panic_blink_enter() between every mdelay and keeps blinking in spite of long spin timer mode. The time to call to mdelay is now 100ms. Even this change will keep panic_timeout accuracy enough when running under a hypervisor. Signed-off-by: TAMUKI Shoichi Cc: Ben Dooks Cc: Russell King Acked-by: Dmitry Torokhov Cc: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/panic.c | 58 ++++++++++++++++++++++++++-------------------------------- 1 file changed, 26 insertions(+), 32 deletions(-) (limited to 'kernel') diff --git a/kernel/panic.c b/kernel/panic.c index 3b16cd9..3e9037a 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -24,6 +24,9 @@ #include #include +#define PANIC_TIMER_STEP 100 +#define PANIC_BLINK_SPD 18 + int panic_on_oops; static unsigned long tainted_mask; static int pause_on_oops; @@ -36,36 +39,15 @@ ATOMIC_NOTIFIER_HEAD(panic_notifier_list); EXPORT_SYMBOL(panic_notifier_list); -/* Returns how long it waited in ms */ -long (*panic_blink)(long time); -EXPORT_SYMBOL(panic_blink); - -static void panic_blink_one_second(void) +static long no_blink(int state) { - static long i = 0, end; - - if (panic_blink) { - end = i + MSEC_PER_SEC; - - while (i < end) { - i += panic_blink(i); - mdelay(1); - i++; - } - } else { - /* - * When running under a hypervisor a small mdelay may get - * rounded up to the hypervisor timeslice. For example, with - * a 1ms in 10ms hypervisor timeslice we might inflate a - * mdelay(1) loop by 10x. - * - * If we have nothing to blink, spin on 1 second calls to - * mdelay to avoid this. - */ - mdelay(MSEC_PER_SEC); - } + return 0; } +/* Returns how long it waited in ms */ +long (*panic_blink)(int state); +EXPORT_SYMBOL(panic_blink); + /** * panic - halt the system * @fmt: The text string to print @@ -78,7 +60,8 @@ NORET_TYPE void panic(const char * fmt, ...) { static char buf[1024]; va_list args; - long i; + long i, i_next = 0; + int state = 0; /* * It's possible to come here directly from a panic-assertion and @@ -117,6 +100,9 @@ NORET_TYPE void panic(const char * fmt, ...) bust_spinlocks(0); + if (!panic_blink) + panic_blink = no_blink; + if (panic_timeout > 0) { /* * Delay timeout seconds before rebooting the machine. @@ -124,9 +110,13 @@ NORET_TYPE void panic(const char * fmt, ...) */ printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout); - for (i = 0; i < panic_timeout; i++) { + for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { touch_nmi_watchdog(); - panic_blink_one_second(); + if (i >= i_next) { + i += panic_blink(state ^= 1); + i_next = i + 3600 / PANIC_BLINK_SPD; + } + mdelay(PANIC_TIMER_STEP); } /* * This will not be a clean reboot, with everything @@ -152,9 +142,13 @@ NORET_TYPE void panic(const char * fmt, ...) } #endif local_irq_enable(); - while (1) { + for (i = 0; ; i += PANIC_TIMER_STEP) { touch_softlockup_watchdog(); - panic_blink_one_second(); + if (i >= i_next) { + i += panic_blink(state ^= 1); + i_next = i + 3600 / PANIC_BLINK_SPD; + } + mdelay(PANIC_TIMER_STEP); } } -- cgit v1.1 From 863a6049202412a6d655d052eb1c45ca7dd74a83 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 10 Aug 2010 18:03:30 -0700 Subject: lib/bug.c: add oops end marker to WARN implementation We are missing the oops end marker for the exception based WARN implementation in lib/bug.c. This is useful for logfile analysis tools. Signed-off-by: Anton Blanchard Cc: Ingo Molnar Cc: Arjan van de Ven Cc: "Kirill A. Shutemov" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/panic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/panic.c b/kernel/panic.c index 3e9037a..4c13b1a 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -338,7 +338,7 @@ static int init_oops_id(void) } late_initcall(init_oops_id); -static void print_oops_end_marker(void) +void print_oops_end_marker(void) { init_oops_id(); printk(KERN_WARNING "---[ end trace %016llx ]---\n", -- cgit v1.1 From f65a03f6ab6f53a6f2847dbac232dcb38b3b3642 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 10 Aug 2010 18:03:31 -0700 Subject: kexec: return -EFAULT on copy_to_user() failures copy_to/from_user() returns the number of bytes remaining to be copied. It never returns a negative value. The correct return code is -EFAULT and not -EIO. All the callers check for non-zero returns so that's Ok, but the return code is passed to the user so we should fix this. Signed-off-by: Dan Carpenter Cc: Hidetoshi Seto Cc: "Paul E. McKenney" Cc: "Eric W. Biederman" Cc: Simon Kagstrom Acked-by: WANG Cong Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kexec.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/kexec.c b/kernel/kexec.c index 131b170..c0613f7 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -151,8 +151,10 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, image->nr_segments = nr_segments; segment_bytes = nr_segments * sizeof(*segments); result = copy_from_user(image->segment, segments, segment_bytes); - if (result) + if (result) { + result = -EFAULT; goto out; + } /* * Verify we have good destination addresses. The caller is @@ -827,7 +829,7 @@ static int kimage_load_normal_segment(struct kimage *image, result = copy_from_user(ptr, buf, uchunk); kunmap(page); if (result) { - result = (result < 0) ? result : -EIO; + result = -EFAULT; goto out; } ubytes -= uchunk; @@ -882,7 +884,7 @@ static int kimage_load_crash_segment(struct kimage *image, kexec_flush_icache_page(page); kunmap(page); if (result) { - result = (result < 0) ? result : -EIO; + result = -EFAULT; goto out; } ubytes -= uchunk; -- cgit v1.1 From 4201d9a8e86b51dd40aa8a0dabd093376c859985 Mon Sep 17 00:00:00 2001 From: Stefani Seibold Date: Tue, 10 Aug 2010 18:03:38 -0700 Subject: kfifo: add the new generic kfifo API Add the new version of the kfifo API files kfifo.c and kfifo.h. Signed-off-by: Stefani Seibold Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kfifo-new.c | 602 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 602 insertions(+) create mode 100644 kernel/kfifo-new.c (limited to 'kernel') diff --git a/kernel/kfifo-new.c b/kernel/kfifo-new.c new file mode 100644 index 0000000..02192dd --- /dev/null +++ b/kernel/kfifo-new.c @@ -0,0 +1,602 @@ +/* + * A generic kernel FIFO implementation + * + * Copyright (C) 2009/2010 Stefani Seibold + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * internal helper to calculate the unused elements in a fifo + */ +static inline unsigned int kfifo_unused(struct __kfifo *fifo) +{ + return (fifo->mask + 1) - (fifo->in - fifo->out); +} + +int __kfifo_alloc(struct __kfifo *fifo, unsigned int size, + size_t esize, gfp_t gfp_mask) +{ + /* + * round down to the next power of 2, since our 'let the indices + * wrap' technique works only in this case. + */ + if (!is_power_of_2(size)) + size = rounddown_pow_of_two(size); + + fifo->in = 0; + fifo->out = 0; + fifo->esize = esize; + + if (size < 2) { + fifo->data = NULL; + fifo->mask = 0; + return -EINVAL; + } + + fifo->data = kmalloc(size * esize, gfp_mask); + + if (!fifo->data) { + fifo->mask = 0; + return -ENOMEM; + } + fifo->mask = size - 1; + + return 0; +} +EXPORT_SYMBOL(__kfifo_alloc); + +void __kfifo_free(struct __kfifo *fifo) +{ + kfree(fifo->data); + fifo->in = 0; + fifo->out = 0; + fifo->esize = 0; + fifo->data = NULL; + fifo->mask = 0; +} +EXPORT_SYMBOL(__kfifo_free); + +int __kfifo_init(struct __kfifo *fifo, void *buffer, + unsigned int size, size_t esize) +{ + size /= esize; + + if (!is_power_of_2(size)) + size = rounddown_pow_of_two(size); + + fifo->in = 0; + fifo->out = 0; + fifo->esize = esize; + fifo->data = buffer; + + if (size < 2) { + fifo->mask = 0; + return -EINVAL; + } + fifo->mask = size - 1; + + return 0; +} +EXPORT_SYMBOL(__kfifo_init); + +static void kfifo_copy_in(struct __kfifo *fifo, const void *src, + unsigned int len, unsigned int off) +{ + unsigned int size = fifo->mask + 1; + unsigned int esize = fifo->esize; + unsigned int l; + + off &= fifo->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + memcpy(fifo->data + off, src, l); + memcpy(fifo->data, src + l, len - l); + /* + * make sure that the data in the fifo is up to date before + * incrementing the fifo->in index counter + */ + smp_wmb(); +} + +unsigned int __kfifo_in(struct __kfifo *fifo, + const void *buf, unsigned int len) +{ + unsigned int l; + + l = kfifo_unused(fifo); + if (len > l) + len = l; + + kfifo_copy_in(fifo, buf, len, fifo->in); + fifo->in += len; + return len; +} +EXPORT_SYMBOL(__kfifo_in); + +static void kfifo_copy_out(struct __kfifo *fifo, void *dst, + unsigned int len, unsigned int off) +{ + unsigned int size = fifo->mask + 1; + unsigned int esize = fifo->esize; + unsigned int l; + + off &= fifo->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + memcpy(dst, fifo->data + off, l); + memcpy(dst + l, fifo->data, len - l); + /* + * make sure that the data is copied before + * incrementing the fifo->out index counter + */ + smp_wmb(); +} + +unsigned int __kfifo_out_peek(struct __kfifo *fifo, + void *buf, unsigned int len) +{ + unsigned int l; + + l = fifo->in - fifo->out; + if (len > l) + len = l; + + kfifo_copy_out(fifo, buf, len, fifo->out); + return len; +} +EXPORT_SYMBOL(__kfifo_out_peek); + +unsigned int __kfifo_out(struct __kfifo *fifo, + void *buf, unsigned int len) +{ + len = __kfifo_out_peek(fifo, buf, len); + fifo->out += len; + return len; +} +EXPORT_SYMBOL(__kfifo_out); + +static unsigned long kfifo_copy_from_user(struct __kfifo *fifo, + const void __user *from, unsigned int len, unsigned int off, + unsigned int *copied) +{ + unsigned int size = fifo->mask + 1; + unsigned int esize = fifo->esize; + unsigned int l; + unsigned long ret; + + off &= fifo->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + ret = copy_from_user(fifo->data + off, from, l); + if (unlikely(ret)) + ret = DIV_ROUND_UP(ret + len - l, esize); + else { + ret = copy_from_user(fifo->data, from + l, len - l); + if (unlikely(ret)) + ret = DIV_ROUND_UP(ret, esize); + } + /* + * make sure that the data in the fifo is up to date before + * incrementing the fifo->in index counter + */ + smp_wmb(); + *copied = len - ret; + /* return the number of elements which are not copied */ + return ret; +} + +int __kfifo_from_user(struct __kfifo *fifo, const void __user *from, + unsigned long len, unsigned int *copied) +{ + unsigned int l; + unsigned long ret; + unsigned int esize = fifo->esize; + int err; + + if (esize != 1) + len /= esize; + + l = kfifo_unused(fifo); + if (len > l) + len = l; + + ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied); + if (unlikely(ret)) { + len -= ret; + err = -EFAULT; + } else + err = 0; + fifo->in += len; + return err; +} +EXPORT_SYMBOL(__kfifo_from_user); + +static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to, + unsigned int len, unsigned int off, unsigned int *copied) +{ + unsigned int l; + unsigned long ret; + unsigned int size = fifo->mask + 1; + unsigned int esize = fifo->esize; + + off &= fifo->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + ret = copy_to_user(to, fifo->data + off, l); + if (unlikely(ret)) + ret = DIV_ROUND_UP(ret + len - l, esize); + else { + ret = copy_to_user(to + l, fifo->data, len - l); + if (unlikely(ret)) + ret = DIV_ROUND_UP(ret, esize); + } + /* + * make sure that the data is copied before + * incrementing the fifo->out index counter + */ + smp_wmb(); + *copied = len - ret; + /* return the number of elements which are not copied */ + return ret; +} + +int __kfifo_to_user(struct __kfifo *fifo, void __user *to, + unsigned long len, unsigned int *copied) +{ + unsigned int l; + unsigned long ret; + unsigned int esize = fifo->esize; + int err; + + if (esize != 1) + len /= esize; + + l = fifo->in - fifo->out; + if (len > l) + len = l; + ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied); + if (unlikely(ret)) { + len -= ret; + err = -EFAULT; + } else + err = 0; + fifo->out += len; + return err; +} +EXPORT_SYMBOL(__kfifo_to_user); + +static int setup_sgl_buf(struct scatterlist *sgl, void *buf, + int nents, unsigned int len) +{ + int n; + unsigned int l; + unsigned int off; + struct page *page; + + if (!nents) + return 0; + + if (!len) + return 0; + + n = 0; + page = virt_to_page(buf); + off = offset_in_page(buf); + l = 0; + + while (len >= l + PAGE_SIZE - off) { + struct page *npage; + + l += PAGE_SIZE; + buf += PAGE_SIZE; + npage = virt_to_page(buf); + if (page_to_phys(page) != page_to_phys(npage) - l) { + sgl->page_link = 0; + sg_set_page(sgl++, page, l - off, off); + if (++n == nents) + return n; + page = npage; + len -= l - off; + l = off = 0; + } + } + sgl->page_link = 0; + sg_set_page(sgl++, page, len, off); + return n + 1; +} + +static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, + int nents, unsigned int len, unsigned int off) +{ + unsigned int size = fifo->mask + 1; + unsigned int esize = fifo->esize; + unsigned int l; + unsigned int n; + + off &= fifo->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + n = setup_sgl_buf(sgl, fifo->data + off, nents, l); + n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); + + if (n) + sg_mark_end(sgl + n - 1); + return n; +} + +unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len) +{ + unsigned int l; + + l = kfifo_unused(fifo); + if (len > l) + len = l; + + return setup_sgl(fifo, sgl, nents, len, fifo->in); +} +EXPORT_SYMBOL(__kfifo_dma_in_prepare); + +unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len) +{ + unsigned int l; + + l = fifo->in - fifo->out; + if (len > l) + len = l; + + return setup_sgl(fifo, sgl, nents, len, fifo->out); +} +EXPORT_SYMBOL(__kfifo_dma_out_prepare); + +unsigned int __kfifo_max_r(unsigned int len, size_t recsize) +{ + unsigned int max = (1 << (recsize << 3)) - 1; + + if (len > max) + return max; + return len; +} + +#define __KFIFO_PEEK(data, out, mask) \ + ((data)[(out) & (mask)]) +/* + * __kfifo_peek_n internal helper function for determinate the length of + * the next record in the fifo + */ +static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize) +{ + unsigned int l; + unsigned int mask = fifo->mask; + unsigned char *data = fifo->data; + + l = __KFIFO_PEEK(data, fifo->out, mask); + + if (--recsize) + l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8; + + return l; +} + +#define __KFIFO_POKE(data, in, mask, val) \ + ( \ + (data)[(in) & (mask)] = (unsigned char)(val) \ + ) + +/* + * __kfifo_poke_n internal helper function for storeing the length of + * the record into the fifo + */ +static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize) +{ + unsigned int mask = fifo->mask; + unsigned char *data = fifo->data; + + __KFIFO_POKE(data, fifo->in, mask, n); + + if (recsize > 1) + __KFIFO_POKE(data, fifo->in + 1, mask, n >> 8); +} + +unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize) +{ + return __kfifo_peek_n(fifo, recsize); +} +EXPORT_SYMBOL(__kfifo_len_r); + +unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf, + unsigned int len, size_t recsize) +{ + if (len + recsize > kfifo_unused(fifo)) + return 0; + + __kfifo_poke_n(fifo, len, recsize); + + kfifo_copy_in(fifo, buf, len, fifo->in + recsize); + fifo->in += len + recsize; + return len; +} +EXPORT_SYMBOL(__kfifo_in_r); + +static unsigned int kfifo_out_copy_r(struct __kfifo *fifo, + void *buf, unsigned int len, size_t recsize, unsigned int *n) +{ + *n = __kfifo_peek_n(fifo, recsize); + + if (len > *n) + len = *n; + + kfifo_copy_out(fifo, buf, len, fifo->out + recsize); + return len; +} + +unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf, + unsigned int len, size_t recsize) +{ + unsigned int n; + + if (fifo->in == fifo->out) + return 0; + + return kfifo_out_copy_r(fifo, buf, len, recsize, &n); +} +EXPORT_SYMBOL(__kfifo_out_peek_r); + +unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf, + unsigned int len, size_t recsize) +{ + unsigned int n; + + if (fifo->in == fifo->out) + return 0; + + len = kfifo_out_copy_r(fifo, buf, len, recsize, &n); + fifo->out += n + recsize; + return len; +} +EXPORT_SYMBOL(__kfifo_out_r); + +int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from, + unsigned long len, unsigned int *copied, size_t recsize) +{ + unsigned long ret; + + len = __kfifo_max_r(len, recsize); + + if (len + recsize > kfifo_unused(fifo)) { + *copied = 0; + return 0; + } + + __kfifo_poke_n(fifo, len, recsize); + + ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied); + if (unlikely(ret)) { + *copied = 0; + return -EFAULT; + } + fifo->in += len + recsize; + return 0; +} +EXPORT_SYMBOL(__kfifo_from_user_r); + +int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to, + unsigned long len, unsigned int *copied, size_t recsize) +{ + unsigned long ret; + unsigned int n; + + if (fifo->in == fifo->out) { + *copied = 0; + return 0; + } + + n = __kfifo_peek_n(fifo, recsize); + if (len > n) + len = n; + + ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied); + if (unlikely(ret)) { + *copied = 0; + return -EFAULT; + } + fifo->out += n + recsize; + return 0; +} +EXPORT_SYMBOL(__kfifo_to_user_r); + +unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) +{ + if (!nents) + BUG(); + + len = __kfifo_max_r(len, recsize); + + if (len + recsize > kfifo_unused(fifo)) + return 0; + + return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize); +} +EXPORT_SYMBOL(__kfifo_dma_in_prepare_r); + +void __kfifo_dma_in_finish_r(struct __kfifo *fifo, + unsigned int len, size_t recsize) +{ + len = __kfifo_max_r(len, recsize); + __kfifo_poke_n(fifo, len, recsize); + fifo->in += len + recsize; +} +EXPORT_SYMBOL(__kfifo_dma_in_finish_r); + +unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) +{ + if (!nents) + BUG(); + + len = __kfifo_max_r(len, recsize); + + if (len + recsize > fifo->in - fifo->out) + return 0; + + return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize); +} +EXPORT_SYMBOL(__kfifo_dma_out_prepare_r); + +void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize) +{ + unsigned int len; + + len = __kfifo_peek_n(fifo, recsize); + fifo->out += len + recsize; +} +EXPORT_SYMBOL(__kfifo_dma_out_finish_r); -- cgit v1.1 From 2e956fb320568cc70861761483e2f0e2db75fd66 Mon Sep 17 00:00:00 2001 From: Stefani Seibold Date: Tue, 10 Aug 2010 18:03:38 -0700 Subject: kfifo: replace the old non generic API Simply replace the whole kfifo.c and kfifo.h files with the new generic version and fix the kerneldoc API template file. Signed-off-by: Stefani Seibold Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kfifo-new.c | 602 ------------------------------------------ kernel/kfifo.c | 749 ++++++++++++++++++++++++++++++++--------------------- 2 files changed, 453 insertions(+), 898 deletions(-) delete mode 100644 kernel/kfifo-new.c (limited to 'kernel') diff --git a/kernel/kfifo-new.c b/kernel/kfifo-new.c deleted file mode 100644 index 02192dd..0000000 --- a/kernel/kfifo-new.c +++ /dev/null @@ -1,602 +0,0 @@ -/* - * A generic kernel FIFO implementation - * - * Copyright (C) 2009/2010 Stefani Seibold - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - */ - -#include -#include -#include -#include -#include -#include -#include - -/* - * internal helper to calculate the unused elements in a fifo - */ -static inline unsigned int kfifo_unused(struct __kfifo *fifo) -{ - return (fifo->mask + 1) - (fifo->in - fifo->out); -} - -int __kfifo_alloc(struct __kfifo *fifo, unsigned int size, - size_t esize, gfp_t gfp_mask) -{ - /* - * round down to the next power of 2, since our 'let the indices - * wrap' technique works only in this case. - */ - if (!is_power_of_2(size)) - size = rounddown_pow_of_two(size); - - fifo->in = 0; - fifo->out = 0; - fifo->esize = esize; - - if (size < 2) { - fifo->data = NULL; - fifo->mask = 0; - return -EINVAL; - } - - fifo->data = kmalloc(size * esize, gfp_mask); - - if (!fifo->data) { - fifo->mask = 0; - return -ENOMEM; - } - fifo->mask = size - 1; - - return 0; -} -EXPORT_SYMBOL(__kfifo_alloc); - -void __kfifo_free(struct __kfifo *fifo) -{ - kfree(fifo->data); - fifo->in = 0; - fifo->out = 0; - fifo->esize = 0; - fifo->data = NULL; - fifo->mask = 0; -} -EXPORT_SYMBOL(__kfifo_free); - -int __kfifo_init(struct __kfifo *fifo, void *buffer, - unsigned int size, size_t esize) -{ - size /= esize; - - if (!is_power_of_2(size)) - size = rounddown_pow_of_two(size); - - fifo->in = 0; - fifo->out = 0; - fifo->esize = esize; - fifo->data = buffer; - - if (size < 2) { - fifo->mask = 0; - return -EINVAL; - } - fifo->mask = size - 1; - - return 0; -} -EXPORT_SYMBOL(__kfifo_init); - -static void kfifo_copy_in(struct __kfifo *fifo, const void *src, - unsigned int len, unsigned int off) -{ - unsigned int size = fifo->mask + 1; - unsigned int esize = fifo->esize; - unsigned int l; - - off &= fifo->mask; - if (esize != 1) { - off *= esize; - size *= esize; - len *= esize; - } - l = min(len, size - off); - - memcpy(fifo->data + off, src, l); - memcpy(fifo->data, src + l, len - l); - /* - * make sure that the data in the fifo is up to date before - * incrementing the fifo->in index counter - */ - smp_wmb(); -} - -unsigned int __kfifo_in(struct __kfifo *fifo, - const void *buf, unsigned int len) -{ - unsigned int l; - - l = kfifo_unused(fifo); - if (len > l) - len = l; - - kfifo_copy_in(fifo, buf, len, fifo->in); - fifo->in += len; - return len; -} -EXPORT_SYMBOL(__kfifo_in); - -static void kfifo_copy_out(struct __kfifo *fifo, void *dst, - unsigned int len, unsigned int off) -{ - unsigned int size = fifo->mask + 1; - unsigned int esize = fifo->esize; - unsigned int l; - - off &= fifo->mask; - if (esize != 1) { - off *= esize; - size *= esize; - len *= esize; - } - l = min(len, size - off); - - memcpy(dst, fifo->data + off, l); - memcpy(dst + l, fifo->data, len - l); - /* - * make sure that the data is copied before - * incrementing the fifo->out index counter - */ - smp_wmb(); -} - -unsigned int __kfifo_out_peek(struct __kfifo *fifo, - void *buf, unsigned int len) -{ - unsigned int l; - - l = fifo->in - fifo->out; - if (len > l) - len = l; - - kfifo_copy_out(fifo, buf, len, fifo->out); - return len; -} -EXPORT_SYMBOL(__kfifo_out_peek); - -unsigned int __kfifo_out(struct __kfifo *fifo, - void *buf, unsigned int len) -{ - len = __kfifo_out_peek(fifo, buf, len); - fifo->out += len; - return len; -} -EXPORT_SYMBOL(__kfifo_out); - -static unsigned long kfifo_copy_from_user(struct __kfifo *fifo, - const void __user *from, unsigned int len, unsigned int off, - unsigned int *copied) -{ - unsigned int size = fifo->mask + 1; - unsigned int esize = fifo->esize; - unsigned int l; - unsigned long ret; - - off &= fifo->mask; - if (esize != 1) { - off *= esize; - size *= esize; - len *= esize; - } - l = min(len, size - off); - - ret = copy_from_user(fifo->data + off, from, l); - if (unlikely(ret)) - ret = DIV_ROUND_UP(ret + len - l, esize); - else { - ret = copy_from_user(fifo->data, from + l, len - l); - if (unlikely(ret)) - ret = DIV_ROUND_UP(ret, esize); - } - /* - * make sure that the data in the fifo is up to date before - * incrementing the fifo->in index counter - */ - smp_wmb(); - *copied = len - ret; - /* return the number of elements which are not copied */ - return ret; -} - -int __kfifo_from_user(struct __kfifo *fifo, const void __user *from, - unsigned long len, unsigned int *copied) -{ - unsigned int l; - unsigned long ret; - unsigned int esize = fifo->esize; - int err; - - if (esize != 1) - len /= esize; - - l = kfifo_unused(fifo); - if (len > l) - len = l; - - ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied); - if (unlikely(ret)) { - len -= ret; - err = -EFAULT; - } else - err = 0; - fifo->in += len; - return err; -} -EXPORT_SYMBOL(__kfifo_from_user); - -static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to, - unsigned int len, unsigned int off, unsigned int *copied) -{ - unsigned int l; - unsigned long ret; - unsigned int size = fifo->mask + 1; - unsigned int esize = fifo->esize; - - off &= fifo->mask; - if (esize != 1) { - off *= esize; - size *= esize; - len *= esize; - } - l = min(len, size - off); - - ret = copy_to_user(to, fifo->data + off, l); - if (unlikely(ret)) - ret = DIV_ROUND_UP(ret + len - l, esize); - else { - ret = copy_to_user(to + l, fifo->data, len - l); - if (unlikely(ret)) - ret = DIV_ROUND_UP(ret, esize); - } - /* - * make sure that the data is copied before - * incrementing the fifo->out index counter - */ - smp_wmb(); - *copied = len - ret; - /* return the number of elements which are not copied */ - return ret; -} - -int __kfifo_to_user(struct __kfifo *fifo, void __user *to, - unsigned long len, unsigned int *copied) -{ - unsigned int l; - unsigned long ret; - unsigned int esize = fifo->esize; - int err; - - if (esize != 1) - len /= esize; - - l = fifo->in - fifo->out; - if (len > l) - len = l; - ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied); - if (unlikely(ret)) { - len -= ret; - err = -EFAULT; - } else - err = 0; - fifo->out += len; - return err; -} -EXPORT_SYMBOL(__kfifo_to_user); - -static int setup_sgl_buf(struct scatterlist *sgl, void *buf, - int nents, unsigned int len) -{ - int n; - unsigned int l; - unsigned int off; - struct page *page; - - if (!nents) - return 0; - - if (!len) - return 0; - - n = 0; - page = virt_to_page(buf); - off = offset_in_page(buf); - l = 0; - - while (len >= l + PAGE_SIZE - off) { - struct page *npage; - - l += PAGE_SIZE; - buf += PAGE_SIZE; - npage = virt_to_page(buf); - if (page_to_phys(page) != page_to_phys(npage) - l) { - sgl->page_link = 0; - sg_set_page(sgl++, page, l - off, off); - if (++n == nents) - return n; - page = npage; - len -= l - off; - l = off = 0; - } - } - sgl->page_link = 0; - sg_set_page(sgl++, page, len, off); - return n + 1; -} - -static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, - int nents, unsigned int len, unsigned int off) -{ - unsigned int size = fifo->mask + 1; - unsigned int esize = fifo->esize; - unsigned int l; - unsigned int n; - - off &= fifo->mask; - if (esize != 1) { - off *= esize; - size *= esize; - len *= esize; - } - l = min(len, size - off); - - n = setup_sgl_buf(sgl, fifo->data + off, nents, l); - n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); - - if (n) - sg_mark_end(sgl + n - 1); - return n; -} - -unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len) -{ - unsigned int l; - - l = kfifo_unused(fifo); - if (len > l) - len = l; - - return setup_sgl(fifo, sgl, nents, len, fifo->in); -} -EXPORT_SYMBOL(__kfifo_dma_in_prepare); - -unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len) -{ - unsigned int l; - - l = fifo->in - fifo->out; - if (len > l) - len = l; - - return setup_sgl(fifo, sgl, nents, len, fifo->out); -} -EXPORT_SYMBOL(__kfifo_dma_out_prepare); - -unsigned int __kfifo_max_r(unsigned int len, size_t recsize) -{ - unsigned int max = (1 << (recsize << 3)) - 1; - - if (len > max) - return max; - return len; -} - -#define __KFIFO_PEEK(data, out, mask) \ - ((data)[(out) & (mask)]) -/* - * __kfifo_peek_n internal helper function for determinate the length of - * the next record in the fifo - */ -static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize) -{ - unsigned int l; - unsigned int mask = fifo->mask; - unsigned char *data = fifo->data; - - l = __KFIFO_PEEK(data, fifo->out, mask); - - if (--recsize) - l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8; - - return l; -} - -#define __KFIFO_POKE(data, in, mask, val) \ - ( \ - (data)[(in) & (mask)] = (unsigned char)(val) \ - ) - -/* - * __kfifo_poke_n internal helper function for storeing the length of - * the record into the fifo - */ -static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize) -{ - unsigned int mask = fifo->mask; - unsigned char *data = fifo->data; - - __KFIFO_POKE(data, fifo->in, mask, n); - - if (recsize > 1) - __KFIFO_POKE(data, fifo->in + 1, mask, n >> 8); -} - -unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize) -{ - return __kfifo_peek_n(fifo, recsize); -} -EXPORT_SYMBOL(__kfifo_len_r); - -unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf, - unsigned int len, size_t recsize) -{ - if (len + recsize > kfifo_unused(fifo)) - return 0; - - __kfifo_poke_n(fifo, len, recsize); - - kfifo_copy_in(fifo, buf, len, fifo->in + recsize); - fifo->in += len + recsize; - return len; -} -EXPORT_SYMBOL(__kfifo_in_r); - -static unsigned int kfifo_out_copy_r(struct __kfifo *fifo, - void *buf, unsigned int len, size_t recsize, unsigned int *n) -{ - *n = __kfifo_peek_n(fifo, recsize); - - if (len > *n) - len = *n; - - kfifo_copy_out(fifo, buf, len, fifo->out + recsize); - return len; -} - -unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf, - unsigned int len, size_t recsize) -{ - unsigned int n; - - if (fifo->in == fifo->out) - return 0; - - return kfifo_out_copy_r(fifo, buf, len, recsize, &n); -} -EXPORT_SYMBOL(__kfifo_out_peek_r); - -unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf, - unsigned int len, size_t recsize) -{ - unsigned int n; - - if (fifo->in == fifo->out) - return 0; - - len = kfifo_out_copy_r(fifo, buf, len, recsize, &n); - fifo->out += n + recsize; - return len; -} -EXPORT_SYMBOL(__kfifo_out_r); - -int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from, - unsigned long len, unsigned int *copied, size_t recsize) -{ - unsigned long ret; - - len = __kfifo_max_r(len, recsize); - - if (len + recsize > kfifo_unused(fifo)) { - *copied = 0; - return 0; - } - - __kfifo_poke_n(fifo, len, recsize); - - ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied); - if (unlikely(ret)) { - *copied = 0; - return -EFAULT; - } - fifo->in += len + recsize; - return 0; -} -EXPORT_SYMBOL(__kfifo_from_user_r); - -int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to, - unsigned long len, unsigned int *copied, size_t recsize) -{ - unsigned long ret; - unsigned int n; - - if (fifo->in == fifo->out) { - *copied = 0; - return 0; - } - - n = __kfifo_peek_n(fifo, recsize); - if (len > n) - len = n; - - ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied); - if (unlikely(ret)) { - *copied = 0; - return -EFAULT; - } - fifo->out += n + recsize; - return 0; -} -EXPORT_SYMBOL(__kfifo_to_user_r); - -unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) -{ - if (!nents) - BUG(); - - len = __kfifo_max_r(len, recsize); - - if (len + recsize > kfifo_unused(fifo)) - return 0; - - return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize); -} -EXPORT_SYMBOL(__kfifo_dma_in_prepare_r); - -void __kfifo_dma_in_finish_r(struct __kfifo *fifo, - unsigned int len, size_t recsize) -{ - len = __kfifo_max_r(len, recsize); - __kfifo_poke_n(fifo, len, recsize); - fifo->in += len + recsize; -} -EXPORT_SYMBOL(__kfifo_dma_in_finish_r); - -unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) -{ - if (!nents) - BUG(); - - len = __kfifo_max_r(len, recsize); - - if (len + recsize > fifo->in - fifo->out) - return 0; - - return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize); -} -EXPORT_SYMBOL(__kfifo_dma_out_prepare_r); - -void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize) -{ - unsigned int len; - - len = __kfifo_peek_n(fifo, recsize); - fifo->out += len + recsize; -} -EXPORT_SYMBOL(__kfifo_dma_out_finish_r); diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 35edbe22..02192dd 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c @@ -1,8 +1,7 @@ /* - * A generic kernel FIFO implementation. + * A generic kernel FIFO implementation * - * Copyright (C) 2009 Stefani Seibold - * Copyright (C) 2004 Stelian Pop + * Copyright (C) 2009/2010 Stefani Seibold * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -24,422 +23,580 @@ #include #include #include -#include #include #include +#include -static void _kfifo_init(struct kfifo *fifo, void *buffer, - unsigned int size) -{ - fifo->buffer = buffer; - fifo->size = size; - - kfifo_reset(fifo); -} - -/** - * kfifo_init - initialize a FIFO using a preallocated buffer - * @fifo: the fifo to assign the buffer - * @buffer: the preallocated buffer to be used. - * @size: the size of the internal buffer, this has to be a power of 2. - * +/* + * internal helper to calculate the unused elements in a fifo */ -void kfifo_init(struct kfifo *fifo, void *buffer, unsigned int size) +static inline unsigned int kfifo_unused(struct __kfifo *fifo) { - /* size must be a power of 2 */ - BUG_ON(!is_power_of_2(size)); - - _kfifo_init(fifo, buffer, size); + return (fifo->mask + 1) - (fifo->in - fifo->out); } -EXPORT_SYMBOL(kfifo_init); -/** - * kfifo_alloc - allocates a new FIFO internal buffer - * @fifo: the fifo to assign then new buffer - * @size: the size of the buffer to be allocated, this have to be a power of 2. - * @gfp_mask: get_free_pages mask, passed to kmalloc() - * - * This function dynamically allocates a new fifo internal buffer - * - * The size will be rounded-up to a power of 2. - * The buffer will be release with kfifo_free(). - * Return 0 if no error, otherwise the an error code - */ -int kfifo_alloc(struct kfifo *fifo, unsigned int size, gfp_t gfp_mask) +int __kfifo_alloc(struct __kfifo *fifo, unsigned int size, + size_t esize, gfp_t gfp_mask) { - unsigned char *buffer; - /* - * round up to the next power of 2, since our 'let the indices + * round down to the next power of 2, since our 'let the indices * wrap' technique works only in this case. */ - if (!is_power_of_2(size)) { - BUG_ON(size > 0x80000000); - size = roundup_pow_of_two(size); + if (!is_power_of_2(size)) + size = rounddown_pow_of_two(size); + + fifo->in = 0; + fifo->out = 0; + fifo->esize = esize; + + if (size < 2) { + fifo->data = NULL; + fifo->mask = 0; + return -EINVAL; } - buffer = kmalloc(size, gfp_mask); - if (!buffer) { - _kfifo_init(fifo, NULL, 0); + fifo->data = kmalloc(size * esize, gfp_mask); + + if (!fifo->data) { + fifo->mask = 0; return -ENOMEM; } - - _kfifo_init(fifo, buffer, size); + fifo->mask = size - 1; return 0; } -EXPORT_SYMBOL(kfifo_alloc); +EXPORT_SYMBOL(__kfifo_alloc); -/** - * kfifo_free - frees the FIFO internal buffer - * @fifo: the fifo to be freed. - */ -void kfifo_free(struct kfifo *fifo) +void __kfifo_free(struct __kfifo *fifo) { - kfree(fifo->buffer); - _kfifo_init(fifo, NULL, 0); + kfree(fifo->data); + fifo->in = 0; + fifo->out = 0; + fifo->esize = 0; + fifo->data = NULL; + fifo->mask = 0; } -EXPORT_SYMBOL(kfifo_free); +EXPORT_SYMBOL(__kfifo_free); -/** - * kfifo_skip - skip output data - * @fifo: the fifo to be used. - * @len: number of bytes to skip - */ -void kfifo_skip(struct kfifo *fifo, unsigned int len) +int __kfifo_init(struct __kfifo *fifo, void *buffer, + unsigned int size, size_t esize) { - if (len < kfifo_len(fifo)) { - __kfifo_add_out(fifo, len); - return; + size /= esize; + + if (!is_power_of_2(size)) + size = rounddown_pow_of_two(size); + + fifo->in = 0; + fifo->out = 0; + fifo->esize = esize; + fifo->data = buffer; + + if (size < 2) { + fifo->mask = 0; + return -EINVAL; } - kfifo_reset_out(fifo); + fifo->mask = size - 1; + + return 0; } -EXPORT_SYMBOL(kfifo_skip); +EXPORT_SYMBOL(__kfifo_init); -static inline void __kfifo_in_data(struct kfifo *fifo, - const void *from, unsigned int len, unsigned int off) +static void kfifo_copy_in(struct __kfifo *fifo, const void *src, + unsigned int len, unsigned int off) { + unsigned int size = fifo->mask + 1; + unsigned int esize = fifo->esize; unsigned int l; + off &= fifo->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + memcpy(fifo->data + off, src, l); + memcpy(fifo->data, src + l, len - l); /* - * Ensure that we sample the fifo->out index -before- we - * start putting bytes into the kfifo. + * make sure that the data in the fifo is up to date before + * incrementing the fifo->in index counter */ + smp_wmb(); +} - smp_mb(); - - off = __kfifo_off(fifo, fifo->in + off); +unsigned int __kfifo_in(struct __kfifo *fifo, + const void *buf, unsigned int len) +{ + unsigned int l; - /* first put the data starting from fifo->in to buffer end */ - l = min(len, fifo->size - off); - memcpy(fifo->buffer + off, from, l); + l = kfifo_unused(fifo); + if (len > l) + len = l; - /* then put the rest (if any) at the beginning of the buffer */ - memcpy(fifo->buffer, from + l, len - l); + kfifo_copy_in(fifo, buf, len, fifo->in); + fifo->in += len; + return len; } +EXPORT_SYMBOL(__kfifo_in); -static inline void __kfifo_out_data(struct kfifo *fifo, - void *to, unsigned int len, unsigned int off) +static void kfifo_copy_out(struct __kfifo *fifo, void *dst, + unsigned int len, unsigned int off) { + unsigned int size = fifo->mask + 1; + unsigned int esize = fifo->esize; unsigned int l; + off &= fifo->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + memcpy(dst, fifo->data + off, l); + memcpy(dst + l, fifo->data, len - l); /* - * Ensure that we sample the fifo->in index -before- we - * start removing bytes from the kfifo. + * make sure that the data is copied before + * incrementing the fifo->out index counter */ + smp_wmb(); +} - smp_rmb(); +unsigned int __kfifo_out_peek(struct __kfifo *fifo, + void *buf, unsigned int len) +{ + unsigned int l; - off = __kfifo_off(fifo, fifo->out + off); + l = fifo->in - fifo->out; + if (len > l) + len = l; - /* first get the data from fifo->out until the end of the buffer */ - l = min(len, fifo->size - off); - memcpy(to, fifo->buffer + off, l); + kfifo_copy_out(fifo, buf, len, fifo->out); + return len; +} +EXPORT_SYMBOL(__kfifo_out_peek); - /* then get the rest (if any) from the beginning of the buffer */ - memcpy(to + l, fifo->buffer, len - l); +unsigned int __kfifo_out(struct __kfifo *fifo, + void *buf, unsigned int len) +{ + len = __kfifo_out_peek(fifo, buf, len); + fifo->out += len; + return len; } +EXPORT_SYMBOL(__kfifo_out); -static inline int __kfifo_from_user_data(struct kfifo *fifo, - const void __user *from, unsigned int len, unsigned int off, - unsigned *lenout) +static unsigned long kfifo_copy_from_user(struct __kfifo *fifo, + const void __user *from, unsigned int len, unsigned int off, + unsigned int *copied) { + unsigned int size = fifo->mask + 1; + unsigned int esize = fifo->esize; unsigned int l; - int ret; + unsigned long ret; + off &= fifo->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + ret = copy_from_user(fifo->data + off, from, l); + if (unlikely(ret)) + ret = DIV_ROUND_UP(ret + len - l, esize); + else { + ret = copy_from_user(fifo->data, from + l, len - l); + if (unlikely(ret)) + ret = DIV_ROUND_UP(ret, esize); + } /* - * Ensure that we sample the fifo->out index -before- we - * start putting bytes into the kfifo. + * make sure that the data in the fifo is up to date before + * incrementing the fifo->in index counter */ + smp_wmb(); + *copied = len - ret; + /* return the number of elements which are not copied */ + return ret; +} - smp_mb(); +int __kfifo_from_user(struct __kfifo *fifo, const void __user *from, + unsigned long len, unsigned int *copied) +{ + unsigned int l; + unsigned long ret; + unsigned int esize = fifo->esize; + int err; - off = __kfifo_off(fifo, fifo->in + off); + if (esize != 1) + len /= esize; - /* first put the data starting from fifo->in to buffer end */ - l = min(len, fifo->size - off); - ret = copy_from_user(fifo->buffer + off, from, l); - if (unlikely(ret)) { - *lenout = ret; - return -EFAULT; - } - *lenout = l; + l = kfifo_unused(fifo); + if (len > l) + len = l; - /* then put the rest (if any) at the beginning of the buffer */ - ret = copy_from_user(fifo->buffer, from + l, len - l); - *lenout += ret ? ret : len - l; - return ret ? -EFAULT : 0; + ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied); + if (unlikely(ret)) { + len -= ret; + err = -EFAULT; + } else + err = 0; + fifo->in += len; + return err; } +EXPORT_SYMBOL(__kfifo_from_user); -static inline int __kfifo_to_user_data(struct kfifo *fifo, - void __user *to, unsigned int len, unsigned int off, unsigned *lenout) +static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to, + unsigned int len, unsigned int off, unsigned int *copied) { unsigned int l; - int ret; - + unsigned long ret; + unsigned int size = fifo->mask + 1; + unsigned int esize = fifo->esize; + + off &= fifo->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + ret = copy_to_user(to, fifo->data + off, l); + if (unlikely(ret)) + ret = DIV_ROUND_UP(ret + len - l, esize); + else { + ret = copy_to_user(to + l, fifo->data, len - l); + if (unlikely(ret)) + ret = DIV_ROUND_UP(ret, esize); + } /* - * Ensure that we sample the fifo->in index -before- we - * start removing bytes from the kfifo. + * make sure that the data is copied before + * incrementing the fifo->out index counter */ + smp_wmb(); + *copied = len - ret; + /* return the number of elements which are not copied */ + return ret; +} - smp_rmb(); +int __kfifo_to_user(struct __kfifo *fifo, void __user *to, + unsigned long len, unsigned int *copied) +{ + unsigned int l; + unsigned long ret; + unsigned int esize = fifo->esize; + int err; - off = __kfifo_off(fifo, fifo->out + off); + if (esize != 1) + len /= esize; - /* first get the data from fifo->out until the end of the buffer */ - l = min(len, fifo->size - off); - ret = copy_to_user(to, fifo->buffer + off, l); - *lenout = l; + l = fifo->in - fifo->out; + if (len > l) + len = l; + ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied); if (unlikely(ret)) { - *lenout -= ret; - return -EFAULT; - } + len -= ret; + err = -EFAULT; + } else + err = 0; + fifo->out += len; + return err; +} +EXPORT_SYMBOL(__kfifo_to_user); - /* then get the rest (if any) from the beginning of the buffer */ - len -= l; - ret = copy_to_user(to + l, fifo->buffer, len); - if (unlikely(ret)) { - *lenout += len - ret; - return -EFAULT; +static int setup_sgl_buf(struct scatterlist *sgl, void *buf, + int nents, unsigned int len) +{ + int n; + unsigned int l; + unsigned int off; + struct page *page; + + if (!nents) + return 0; + + if (!len) + return 0; + + n = 0; + page = virt_to_page(buf); + off = offset_in_page(buf); + l = 0; + + while (len >= l + PAGE_SIZE - off) { + struct page *npage; + + l += PAGE_SIZE; + buf += PAGE_SIZE; + npage = virt_to_page(buf); + if (page_to_phys(page) != page_to_phys(npage) - l) { + sgl->page_link = 0; + sg_set_page(sgl++, page, l - off, off); + if (++n == nents) + return n; + page = npage; + len -= l - off; + l = off = 0; + } } - *lenout += len; - return 0; + sgl->page_link = 0; + sg_set_page(sgl++, page, len, off); + return n + 1; } -unsigned int __kfifo_in_n(struct kfifo *fifo, - const void *from, unsigned int len, unsigned int recsize) +static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, + int nents, unsigned int len, unsigned int off) { - if (kfifo_avail(fifo) < len + recsize) - return len + 1; + unsigned int size = fifo->mask + 1; + unsigned int esize = fifo->esize; + unsigned int l; + unsigned int n; - __kfifo_in_data(fifo, from, len, recsize); - return 0; + off &= fifo->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + n = setup_sgl_buf(sgl, fifo->data + off, nents, l); + n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); + + if (n) + sg_mark_end(sgl + n - 1); + return n; } -EXPORT_SYMBOL(__kfifo_in_n); -/** - * kfifo_in - puts some data into the FIFO - * @fifo: the fifo to be used. - * @from: the data to be added. - * @len: the length of the data to be added. - * - * This function copies at most @len bytes from the @from buffer into - * the FIFO depending on the free space, and returns the number of - * bytes copied. - * - * Note that with only one concurrent reader and one concurrent - * writer, you don't need extra locking to use these functions. - */ -unsigned int kfifo_in(struct kfifo *fifo, const void *from, - unsigned int len) +unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len) { - len = min(kfifo_avail(fifo), len); + unsigned int l; - __kfifo_in_data(fifo, from, len, 0); - __kfifo_add_in(fifo, len); - return len; + l = kfifo_unused(fifo); + if (len > l) + len = l; + + return setup_sgl(fifo, sgl, nents, len, fifo->in); } -EXPORT_SYMBOL(kfifo_in); +EXPORT_SYMBOL(__kfifo_dma_in_prepare); -unsigned int __kfifo_in_generic(struct kfifo *fifo, - const void *from, unsigned int len, unsigned int recsize) +unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len) { - return __kfifo_in_rec(fifo, from, len, recsize); + unsigned int l; + + l = fifo->in - fifo->out; + if (len > l) + len = l; + + return setup_sgl(fifo, sgl, nents, len, fifo->out); } -EXPORT_SYMBOL(__kfifo_in_generic); +EXPORT_SYMBOL(__kfifo_dma_out_prepare); -unsigned int __kfifo_out_n(struct kfifo *fifo, - void *to, unsigned int len, unsigned int recsize) +unsigned int __kfifo_max_r(unsigned int len, size_t recsize) { - if (kfifo_len(fifo) < len + recsize) - return len; + unsigned int max = (1 << (recsize << 3)) - 1; - __kfifo_out_data(fifo, to, len, recsize); - __kfifo_add_out(fifo, len + recsize); - return 0; + if (len > max) + return max; + return len; } -EXPORT_SYMBOL(__kfifo_out_n); -/** - * kfifo_out - gets some data from the FIFO - * @fifo: the fifo to be used. - * @to: where the data must be copied. - * @len: the size of the destination buffer. - * - * This function copies at most @len bytes from the FIFO into the - * @to buffer and returns the number of copied bytes. - * - * Note that with only one concurrent reader and one concurrent - * writer, you don't need extra locking to use these functions. +#define __KFIFO_PEEK(data, out, mask) \ + ((data)[(out) & (mask)]) +/* + * __kfifo_peek_n internal helper function for determinate the length of + * the next record in the fifo */ -unsigned int kfifo_out(struct kfifo *fifo, void *to, unsigned int len) +static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize) { - len = min(kfifo_len(fifo), len); + unsigned int l; + unsigned int mask = fifo->mask; + unsigned char *data = fifo->data; - __kfifo_out_data(fifo, to, len, 0); - __kfifo_add_out(fifo, len); + l = __KFIFO_PEEK(data, fifo->out, mask); - return len; + if (--recsize) + l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8; + + return l; } -EXPORT_SYMBOL(kfifo_out); - -/** - * kfifo_out_peek - copy some data from the FIFO, but do not remove it - * @fifo: the fifo to be used. - * @to: where the data must be copied. - * @len: the size of the destination buffer. - * @offset: offset into the fifo - * - * This function copies at most @len bytes at @offset from the FIFO - * into the @to buffer and returns the number of copied bytes. - * The data is not removed from the FIFO. + +#define __KFIFO_POKE(data, in, mask, val) \ + ( \ + (data)[(in) & (mask)] = (unsigned char)(val) \ + ) + +/* + * __kfifo_poke_n internal helper function for storeing the length of + * the record into the fifo */ -unsigned int kfifo_out_peek(struct kfifo *fifo, void *to, unsigned int len, - unsigned offset) +static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize) { - len = min(kfifo_len(fifo), len + offset); + unsigned int mask = fifo->mask; + unsigned char *data = fifo->data; - __kfifo_out_data(fifo, to, len, offset); - return len; + __KFIFO_POKE(data, fifo->in, mask, n); + + if (recsize > 1) + __KFIFO_POKE(data, fifo->in + 1, mask, n >> 8); } -EXPORT_SYMBOL(kfifo_out_peek); -unsigned int __kfifo_out_generic(struct kfifo *fifo, - void *to, unsigned int len, unsigned int recsize, - unsigned int *total) +unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize) { - return __kfifo_out_rec(fifo, to, len, recsize, total); + return __kfifo_peek_n(fifo, recsize); } -EXPORT_SYMBOL(__kfifo_out_generic); +EXPORT_SYMBOL(__kfifo_len_r); -unsigned int __kfifo_from_user_n(struct kfifo *fifo, - const void __user *from, unsigned int len, unsigned int recsize) +unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf, + unsigned int len, size_t recsize) { - unsigned total; + if (len + recsize > kfifo_unused(fifo)) + return 0; - if (kfifo_avail(fifo) < len + recsize) - return len + 1; + __kfifo_poke_n(fifo, len, recsize); - __kfifo_from_user_data(fifo, from, len, recsize, &total); - return total; + kfifo_copy_in(fifo, buf, len, fifo->in + recsize); + fifo->in += len + recsize; + return len; } -EXPORT_SYMBOL(__kfifo_from_user_n); - -/** - * kfifo_from_user - puts some data from user space into the FIFO - * @fifo: the fifo to be used. - * @from: pointer to the data to be added. - * @len: the length of the data to be added. - * @total: the actual returned data length. - * - * This function copies at most @len bytes from the @from into the - * FIFO depending and returns -EFAULT/0. - * - * Note that with only one concurrent reader and one concurrent - * writer, you don't need extra locking to use these functions. - */ -int kfifo_from_user(struct kfifo *fifo, - const void __user *from, unsigned int len, unsigned *total) +EXPORT_SYMBOL(__kfifo_in_r); + +static unsigned int kfifo_out_copy_r(struct __kfifo *fifo, + void *buf, unsigned int len, size_t recsize, unsigned int *n) { - int ret; - len = min(kfifo_avail(fifo), len); - ret = __kfifo_from_user_data(fifo, from, len, 0, total); - if (ret) - return ret; - __kfifo_add_in(fifo, len); - return 0; + *n = __kfifo_peek_n(fifo, recsize); + + if (len > *n) + len = *n; + + kfifo_copy_out(fifo, buf, len, fifo->out + recsize); + return len; +} + +unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf, + unsigned int len, size_t recsize) +{ + unsigned int n; + + if (fifo->in == fifo->out) + return 0; + + return kfifo_out_copy_r(fifo, buf, len, recsize, &n); } -EXPORT_SYMBOL(kfifo_from_user); +EXPORT_SYMBOL(__kfifo_out_peek_r); -unsigned int __kfifo_from_user_generic(struct kfifo *fifo, - const void __user *from, unsigned int len, unsigned int recsize) +unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf, + unsigned int len, size_t recsize) { - return __kfifo_from_user_rec(fifo, from, len, recsize); + unsigned int n; + + if (fifo->in == fifo->out) + return 0; + + len = kfifo_out_copy_r(fifo, buf, len, recsize, &n); + fifo->out += n + recsize; + return len; } -EXPORT_SYMBOL(__kfifo_from_user_generic); +EXPORT_SYMBOL(__kfifo_out_r); -unsigned int __kfifo_to_user_n(struct kfifo *fifo, - void __user *to, unsigned int len, unsigned int reclen, - unsigned int recsize) +int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from, + unsigned long len, unsigned int *copied, size_t recsize) { - unsigned int ret, total; + unsigned long ret; - if (kfifo_len(fifo) < reclen + recsize) - return len; + len = __kfifo_max_r(len, recsize); - ret = __kfifo_to_user_data(fifo, to, reclen, recsize, &total); + if (len + recsize > kfifo_unused(fifo)) { + *copied = 0; + return 0; + } - if (likely(ret == 0)) - __kfifo_add_out(fifo, reclen + recsize); + __kfifo_poke_n(fifo, len, recsize); - return total; + ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied); + if (unlikely(ret)) { + *copied = 0; + return -EFAULT; + } + fifo->in += len + recsize; + return 0; } -EXPORT_SYMBOL(__kfifo_to_user_n); - -/** - * kfifo_to_user - gets data from the FIFO and write it to user space - * @fifo: the fifo to be used. - * @to: where the data must be copied. - * @len: the size of the destination buffer. - * @lenout: pointer to output variable with copied data - * - * This function copies at most @len bytes from the FIFO into the - * @to buffer and 0 or -EFAULT. - * - * Note that with only one concurrent reader and one concurrent - * writer, you don't need extra locking to use these functions. - */ -int kfifo_to_user(struct kfifo *fifo, - void __user *to, unsigned int len, unsigned *lenout) +EXPORT_SYMBOL(__kfifo_from_user_r); + +int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to, + unsigned long len, unsigned int *copied, size_t recsize) { - int ret; - len = min(kfifo_len(fifo), len); - ret = __kfifo_to_user_data(fifo, to, len, 0, lenout); - __kfifo_add_out(fifo, *lenout); - return ret; + unsigned long ret; + unsigned int n; + + if (fifo->in == fifo->out) { + *copied = 0; + return 0; + } + + n = __kfifo_peek_n(fifo, recsize); + if (len > n) + len = n; + + ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied); + if (unlikely(ret)) { + *copied = 0; + return -EFAULT; + } + fifo->out += n + recsize; + return 0; } -EXPORT_SYMBOL(kfifo_to_user); +EXPORT_SYMBOL(__kfifo_to_user_r); -unsigned int __kfifo_to_user_generic(struct kfifo *fifo, - void __user *to, unsigned int len, unsigned int recsize, - unsigned int *total) +unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) { - return __kfifo_to_user_rec(fifo, to, len, recsize, total); + if (!nents) + BUG(); + + len = __kfifo_max_r(len, recsize); + + if (len + recsize > kfifo_unused(fifo)) + return 0; + + return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize); } -EXPORT_SYMBOL(__kfifo_to_user_generic); +EXPORT_SYMBOL(__kfifo_dma_in_prepare_r); -unsigned int __kfifo_peek_generic(struct kfifo *fifo, unsigned int recsize) +void __kfifo_dma_in_finish_r(struct __kfifo *fifo, + unsigned int len, size_t recsize) { - if (recsize == 0) - return kfifo_avail(fifo); - - return __kfifo_peek_n(fifo, recsize); + len = __kfifo_max_r(len, recsize); + __kfifo_poke_n(fifo, len, recsize); + fifo->in += len + recsize; } -EXPORT_SYMBOL(__kfifo_peek_generic); +EXPORT_SYMBOL(__kfifo_dma_in_finish_r); -void __kfifo_skip_generic(struct kfifo *fifo, unsigned int recsize) +unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) { - __kfifo_skip_rec(fifo, recsize); + if (!nents) + BUG(); + + len = __kfifo_max_r(len, recsize); + + if (len + recsize > fifo->in - fifo->out) + return 0; + + return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize); } -EXPORT_SYMBOL(__kfifo_skip_generic); +EXPORT_SYMBOL(__kfifo_dma_out_prepare_r); + +void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize) +{ + unsigned int len; + len = __kfifo_peek_n(fifo, recsize); + fifo->out += len + recsize; +} +EXPORT_SYMBOL(__kfifo_dma_out_finish_r); -- cgit v1.1 From d78a3eda6985e74bc21a23362f27526f73e71649 Mon Sep 17 00:00:00 2001 From: Stefani Seibold Date: Wed, 11 Aug 2010 14:17:27 -0700 Subject: kernel/kfifo.c: add handling of chained scatterlists The current kfifo scatterlist implementation will not work with chained scatterlists. It assumes that struct scatterlist arrays are allocated contiguously, which is not the case when chained scatterlists (struct sg_table) are in use. Signed-off-by: Stefani Seibold Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kfifo.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 02192dd..4502604 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c @@ -10,7 +10,7 @@ * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License @@ -333,17 +333,16 @@ static int setup_sgl_buf(struct scatterlist *sgl, void *buf, buf += PAGE_SIZE; npage = virt_to_page(buf); if (page_to_phys(page) != page_to_phys(npage) - l) { - sgl->page_link = 0; - sg_set_page(sgl++, page, l - off, off); - if (++n == nents) + sg_set_page(sgl, page, l - off, off); + sgl = sg_next(sgl); + if (++n == nents || sgl == NULL) return n; page = npage; len -= l - off; l = off = 0; } } - sgl->page_link = 0; - sg_set_page(sgl++, page, len, off); + sg_set_page(sgl, page, len, off); return n + 1; } @@ -363,7 +362,7 @@ static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, } l = min(len, size - off); - n = setup_sgl_buf(sgl, fifo->data + off, nents, l); + n = setup_sgl_buf(sgl, fifo->data + off, nents, l); n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); if (n) -- cgit v1.1 From 8d57a98ccd0b4489003473979da8f5a1363ba7a3 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 11 Aug 2010 14:17:49 -0700 Subject: block: add secure discard Secure discard is the same as discard except that all copies of the discarded sectors (perhaps created by garbage collection) must also be erased. Signed-off-by: Adrian Hunter Acked-by: Jens Axboe Cc: Kyungmin Park Cc: Madhusudhan Chikkature Cc: Christoph Hellwig Cc: Ben Gardiner Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/trace/blktrace.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 82499a5..959f8d6 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -710,6 +710,9 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, if (rq->cmd_flags & REQ_DISCARD) rw |= REQ_DISCARD; + if (rq->cmd_flags & REQ_SECURE) + rw |= REQ_SECURE; + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { what |= BLK_TC_ACT(BLK_TC_PC); __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw, @@ -1816,6 +1819,8 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) rwbs[i++] = 'S'; if (rw & REQ_META) rwbs[i++] = 'M'; + if (rw & REQ_SECURE) + rwbs[i++] = 'E'; rwbs[i] = '\0'; } @@ -1828,6 +1833,9 @@ void blk_fill_rwbs_rq(char *rwbs, struct request *rq) if (rq->cmd_flags & REQ_DISCARD) rw |= REQ_DISCARD; + if (rq->cmd_flags & REQ_SECURE) + rw |= REQ_SECURE; + bytes = blk_rq_bytes(rq); blk_fill_rwbs(rwbs, rw, bytes); -- cgit v1.1 From 12fdff3fc2483f906ae6404a6e8dcf2550310b6f Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 12 Aug 2010 16:54:57 +0100 Subject: Add a dummy printk function for the maintenance of unused printks Add a dummy printk function for the maintenance of unused printks through gcc format checking, and also so that side-effect checking is maintained too. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- kernel/cred.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'kernel') diff --git a/kernel/cred.c b/kernel/cred.c index 60bc8b1..9a3e226 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -22,10 +22,6 @@ #define kdebug(FMT, ...) \ printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__) #else -static inline __attribute__((format(printf, 1, 2))) -void no_printk(const char *fmt, ...) -{ -} #define kdebug(FMT, ...) \ no_printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__) #endif -- cgit v1.1 From deda2e81961e96be4f2c09328baca4710a2fd1a0 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Mon, 9 Aug 2010 14:20:09 -0700 Subject: timekeeping: Fix overflow in rawtime tv_nsec on 32 bit archs The tv_nsec is a long and when added to the shifted interval it can wrap and become negative which later causes looping problems in the getrawmonotonic(). The edge case occurs when the system has slept for a short period of time of ~2 seconds. A trace printk of the values in this patch illustrate the problem: ftrace time stamp: log 43.716079: logarithmic_accumulation: raw: 3d0913 tv_nsec d687faa 43.718513: logarithmic_accumulation: raw: 3d0913 tv_nsec da588bd 43.722161: logarithmic_accumulation: raw: 3d0913 tv_nsec de291d0 46.349925: logarithmic_accumulation: raw: 7a122600 tv_nsec e1f9ae3 46.349930: logarithmic_accumulation: raw: 1e848980 tv_nsec 8831c0e3 The kernel starts looping at 46.349925 in the getrawmonotonic() due to the negative value from adding the raw value to tv_nsec. A simple solution is to accumulate into a u64, and then normalize it to a timespec_t. Signed-off-by: Jason Wessel [ Reworked variable names and simplified some of the code. - John ] Signed-off-by: John Stultz Cc: Thomas Gleixner Cc: H. Peter Anvin Signed-off-by: Linus Torvalds --- kernel/time/timekeeping.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index e14c839..e960d82 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -690,6 +690,7 @@ static void timekeeping_adjust(s64 offset) static cycle_t logarithmic_accumulation(cycle_t offset, int shift) { u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; + u64 raw_nsecs; /* If the offset is smaller then a shifted interval, do nothing */ if (offset < timekeeper.cycle_interval<= NSEC_PER_SEC) { - raw_time.tv_nsec -= NSEC_PER_SEC; + /* Accumulate raw time */ + raw_nsecs = timekeeper.raw_interval << shift; + raw_nsecs += raw_time.tv_nsec; + while (raw_nsecs >= NSEC_PER_SEC) { + raw_nsecs -= NSEC_PER_SEC; raw_time.tv_sec++; } + raw_time.tv_nsec = raw_nsecs; /* Accumulate error between NTP and clock interval */ timekeeper.ntp_error += tick_length << shift; -- cgit v1.1 From 2a37a3df57c44e947271758a1aa4bea7bff9feab Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 3 Jun 2010 15:21:34 -0400 Subject: tracing/events: Convert format output to seq_file Two new events were added that broke the current format output. Both from the SCSI system: scsi_dispatch_cmd_done and scsi_dispatch_cmd_timeout The reason is that their print_fmt exceeded a page size. Since the output of the format used simple_read_from_buffer and trace_seq, it was limited to a page size in output. This patch converts the printing of the format of an event into seq_file, which allows greater than a page size to be shown. I diffed all event formats comparing the output with and without this patch. All matched except for the above two, which showed just: FORMAT TOO BIG without this patch, but now properly displays the output with this patch. v2: Remove updating *pos in seq start function. [ Thanks to Li Zefan for pointing that out ] Reviewed-by: Li Zefan Cc: Martin K. Petersen Cc: Kei Tokunaga Cc: James Bottomley Cc: Tomohiro Kusumi Cc: Xiao Guangrong Signed-off-by: Steven Rostedt --- kernel/trace/trace_events.c | 208 ++++++++++++++++++++++++++++++-------------- 1 file changed, 141 insertions(+), 67 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 53cffc0..45a8968 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -29,6 +29,8 @@ DEFINE_MUTEX(event_mutex); LIST_HEAD(ftrace_events); +#define COMMON_FIELD_COUNT 5 + struct list_head * trace_get_fields(struct ftrace_event_call *event_call) { @@ -544,85 +546,155 @@ out: return ret; } -static ssize_t -event_format_read(struct file *filp, char __user *ubuf, size_t cnt, - loff_t *ppos) +enum { + FORMAT_HEADER = 1, + FORMAT_PRINTFMT = 2, +}; + +static void *f_next(struct seq_file *m, void *v, loff_t *pos) { - struct ftrace_event_call *call = filp->private_data; + struct ftrace_event_call *call = m->private; struct ftrace_event_field *field; struct list_head *head; - struct trace_seq *s; - int common_field_count = 5; - char *buf; - int r = 0; - - if (*ppos) - return 0; + loff_t index = *pos; - s = kmalloc(sizeof(*s), GFP_KERNEL); - if (!s) - return -ENOMEM; + (*pos)++; - trace_seq_init(s); + head = trace_get_fields(call); - trace_seq_printf(s, "name: %s\n", call->name); - trace_seq_printf(s, "ID: %d\n", call->event.type); - trace_seq_printf(s, "format:\n"); + switch ((unsigned long)v) { + case FORMAT_HEADER: - head = trace_get_fields(call); - list_for_each_entry_reverse(field, head, link) { - /* - * Smartly shows the array type(except dynamic array). - * Normal: - * field:TYPE VAR - * If TYPE := TYPE[LEN], it is shown: - * field:TYPE VAR[LEN] - */ - const char *array_descriptor = strchr(field->type, '['); - - if (!strncmp(field->type, "__data_loc", 10)) - array_descriptor = NULL; - - if (!array_descriptor) { - r = trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;" - "\tsize:%u;\tsigned:%d;\n", - field->type, field->name, field->offset, - field->size, !!field->is_signed); - } else { - r = trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;" - "\tsize:%u;\tsigned:%d;\n", - (int)(array_descriptor - field->type), - field->type, field->name, - array_descriptor, field->offset, - field->size, !!field->is_signed); - } + if (unlikely(list_empty(head))) + return NULL; - if (--common_field_count == 0) - r = trace_seq_printf(s, "\n"); + field = list_entry(head->prev, struct ftrace_event_field, link); + return field; - if (!r) - break; + case FORMAT_PRINTFMT: + /* all done */ + return NULL; } - if (r) - r = trace_seq_printf(s, "\nprint fmt: %s\n", - call->print_fmt); + /* + * To separate common fields from event fields, the + * LSB is set on the first event field. Clear it in case. + */ + v = (void *)((unsigned long)v & ~1L); - if (!r) { - /* - * ug! The format output is bigger than a PAGE!! - */ - buf = "FORMAT TOO BIG\n"; - r = simple_read_from_buffer(ubuf, cnt, ppos, - buf, strlen(buf)); - goto out; + field = v; + if (field->link.prev == head) + return (void *)FORMAT_PRINTFMT; + + field = list_entry(field->link.prev, struct ftrace_event_field, link); + + /* Set the LSB to notify f_show to print an extra newline */ + if (index == COMMON_FIELD_COUNT) + field = (struct ftrace_event_field *) + ((unsigned long)field | 1); + + return field; +} + +static void *f_start(struct seq_file *m, loff_t *pos) +{ + loff_t l = 0; + void *p; + + /* Start by showing the header */ + if (!*pos) + return (void *)FORMAT_HEADER; + + p = (void *)FORMAT_HEADER; + do { + p = f_next(m, p, &l); + } while (p && l < *pos); + + return p; +} + +static int f_show(struct seq_file *m, void *v) +{ + struct ftrace_event_call *call = m->private; + struct ftrace_event_field *field; + const char *array_descriptor; + + switch ((unsigned long)v) { + case FORMAT_HEADER: + seq_printf(m, "name: %s\n", call->name); + seq_printf(m, "ID: %d\n", call->event.type); + seq_printf(m, "format:\n"); + return 0; + + case FORMAT_PRINTFMT: + seq_printf(m, "\nprint fmt: %s\n", + call->print_fmt); + return 0; } - r = simple_read_from_buffer(ubuf, cnt, ppos, - s->buffer, s->len); - out: - kfree(s); - return r; + /* + * To separate common fields from event fields, the + * LSB is set on the first event field. Clear it and + * print a newline if it is set. + */ + if ((unsigned long)v & 1) { + seq_putc(m, '\n'); + v = (void *)((unsigned long)v & ~1L); + } + + field = v; + + /* + * Smartly shows the array type(except dynamic array). + * Normal: + * field:TYPE VAR + * If TYPE := TYPE[LEN], it is shown: + * field:TYPE VAR[LEN] + */ + array_descriptor = strchr(field->type, '['); + + if (!strncmp(field->type, "__data_loc", 10)) + array_descriptor = NULL; + + if (!array_descriptor) + seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n", + field->type, field->name, field->offset, + field->size, !!field->is_signed); + else + seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n", + (int)(array_descriptor - field->type), + field->type, field->name, + array_descriptor, field->offset, + field->size, !!field->is_signed); + + return 0; +} + +static void f_stop(struct seq_file *m, void *p) +{ +} + +static const struct seq_operations trace_format_seq_ops = { + .start = f_start, + .next = f_next, + .stop = f_stop, + .show = f_show, +}; + +static int trace_format_open(struct inode *inode, struct file *file) +{ + struct ftrace_event_call *call = inode->i_private; + struct seq_file *m; + int ret; + + ret = seq_open(file, &trace_format_seq_ops); + if (ret < 0) + return ret; + + m = file->private_data; + m->private = call; + + return 0; } static ssize_t @@ -820,8 +892,10 @@ static const struct file_operations ftrace_enable_fops = { }; static const struct file_operations ftrace_event_format_fops = { - .open = tracing_open_generic, - .read = event_format_read, + .open = trace_format_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, }; static const struct file_operations ftrace_event_id_fops = { -- cgit v1.1 From 2069601b3f0ea38170d4b509b89f3ca0a373bdc1 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 12 Aug 2010 14:23:04 -0700 Subject: Revert "fsnotify: store struct file not struct path" This reverts commit 3bcf3860a4ff9bbc522820b4b765e65e4deceb3e (and the accompanying commit c1e5c954020e "vfs/fsnotify: fsnotify_close can delay the final work in fput" that was a horribly ugly hack to make it work at all). The 'struct file' approach not only causes that disgusting hack, it somehow breaks pulseaudio, probably due to some other subtlety with f_count handling. Fix up various conflicts due to later fsnotify work. Signed-off-by: Linus Torvalds --- kernel/audit_watch.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 6bf2306..f0c9b2e 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -526,8 +526,8 @@ static int audit_watch_handle_event(struct fsnotify_group *group, BUG_ON(group != audit_watch_group); switch (event->data_type) { - case (FSNOTIFY_EVENT_FILE): - inode = event->file->f_path.dentry->d_inode; + case (FSNOTIFY_EVENT_PATH): + inode = event->path.dentry->d_inode; break; case (FSNOTIFY_EVENT_INODE): inode = event->inode; -- cgit v1.1 From c7dcf87a6881bf796faee83003163eb3de41a309 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Fri, 13 Aug 2010 11:30:58 -0700 Subject: time: Workaround gcc loop optimization that causes 64bit div errors Early 4.3 versions of gcc apparently aggressively optimize the raw time accumulation loop, replacing it with a divide. On 32bit systems, this causes the following link errors: undefined reference to `__umoddi3' undefined reference to `__udivdi3' The gcc issue has been fixed in 4.4 and greater. This patch replaces the accumulation loop with a do_div, as suggested by Linus. Signed-off-by: John Stultz CC: Jason Wessel CC: Larry Finger CC: Ingo Molnar CC: Linus Torvalds Signed-off-by: Linus Torvalds --- kernel/time/timekeeping.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index e960d82..49010d8 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -710,9 +710,10 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) /* Accumulate raw time */ raw_nsecs = timekeeper.raw_interval << shift; raw_nsecs += raw_time.tv_nsec; - while (raw_nsecs >= NSEC_PER_SEC) { - raw_nsecs -= NSEC_PER_SEC; - raw_time.tv_sec++; + if (raw_nsecs >= NSEC_PER_SEC) { + u64 raw_secs = raw_nsecs; + raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); + raw_time.tv_sec += raw_secs; } raw_time.tv_nsec = raw_nsecs; -- cgit v1.1 From 1aa54bca6ee0d07ebcafb8ca8074b624d80724aa Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Wed, 28 Jul 2010 01:18:01 +0200 Subject: tracing: Sanitize value returned from write(trace_marker, "...", len) When userspace code writes non-new-line-terminated string to trace_marker file, write handler appends new-line and returns number of bytes written to trace buffer, so write(fd, "abc", 3) will return 4 That's unexpected and unfortunately it confuses glibc's fprintf function. Example: int main() { fprintf(stderr, "abc"); return 0; } $ gcc test.c -o test $ echo mmiotrace > /sys/kernel/debug/tracing/current_tracer $ ./test 2>/sys/kernel/debug/tracing/trace_marker results in infinite loop: write(fd, "abc", 3) = 4 write(fd, "", 1) = 0 write(fd, "", 1) = 0 write(fd, "", 1) = 0 write(fd, "", 1) = 0 write(fd, "", 1) = 0 write(fd, "", 1) = 0 write(fd, "", 1) = 0 (...) ...and kernel trace buffer full of empty markers. Fix it by sanitizing write return value. Signed-off-by: Marcin Slusarz LKML-Reference: <20100727231801.GB2826@joi.lan> Cc: Frederic Weisbecker Cc: Ingo Molnar Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 086d363..88b42d1 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3498,6 +3498,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { char *buf; + size_t written; if (tracing_disabled) return -EINVAL; @@ -3519,11 +3520,15 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, } else buf[cnt] = '\0'; - cnt = mark_printk("%s", buf); + written = mark_printk("%s", buf); kfree(buf); - *fpos += cnt; + *fpos += written; - return cnt; + /* don't tell userspace we wrote more - it might confuse them */ + if (written > cnt) + written = cnt; + + return written; } static int tracing_clock_show(struct seq_file *m, void *v) -- cgit v1.1 From b590cddfa6f40447158323b43a13cdae01d9a051 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Mon, 16 Aug 2010 15:58:29 -0500 Subject: kdb: fix compile error without CONFIG_KALLSYMS If CONFIG_KGDB_KDB is set and CONFIG_KALLSYMS is not set the kernel will fail to build with the error: kernel/built-in.o: In function `kallsyms_symbol_next': kernel/debug/kdb/kdb_support.c:237: undefined reference to `kdb_walk_kallsyms' kernel/built-in.o: In function `kallsyms_symbol_complete': kernel/debug/kdb/kdb_support.c:193: undefined reference to `kdb_walk_kallsyms' The kdb_walk_kallsyms needs a #ifdef proper header to match the C implementation. This patch also fixes the compiler warnings in kdb_support.c when compiling without CONFIG_KALLSYMS set. The compiler warnings are a result of the kallsyms_lookup() macro not initializing the two of the pass by reference variables. Signed-off-by: Jason Wessel Reported-by: Michal Simek --- kernel/debug/kdb/kdb_private.h | 7 +++++++ kernel/debug/kdb/kdb_support.c | 4 ++-- 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index c438f54..be775f7 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h @@ -255,7 +255,14 @@ extern void kdb_ps1(const struct task_struct *p); extern void kdb_print_nameval(const char *name, unsigned long val); extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info); extern void kdb_meminfo_proc_show(void); +#ifdef CONFIG_KALLSYMS extern const char *kdb_walk_kallsyms(loff_t *pos); +#else /* ! CONFIG_KALLSYMS */ +static inline const char *kdb_walk_kallsyms(loff_t *pos) +{ + return NULL; +} +#endif /* ! CONFIG_KALLSYMS */ extern char *kdb_getstr(char *, size_t, char *); /* Defines for kdb_symbol_print */ diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c index 45344d5c..6b2485d 100644 --- a/kernel/debug/kdb/kdb_support.c +++ b/kernel/debug/kdb/kdb_support.c @@ -82,8 +82,8 @@ static char *kdb_name_table[100]; /* arbitrary size */ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab) { int ret = 0; - unsigned long symbolsize; - unsigned long offset; + unsigned long symbolsize = 0; + unsigned long offset = 0; #define knt1_size 128 /* must be >= kallsyms table size */ char *knt1 = NULL; -- cgit v1.1 From d7627467b7a8dd6944885290a03a07ceb28c10eb Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 17 Aug 2010 23:52:56 +0100 Subject: Make do_execve() take a const filename pointer Make do_execve() take a const filename pointer so that kernel_execve() compiles correctly on ARM: arch/arm/kernel/sys_arm.c:88: warning: passing argument 1 of 'do_execve' discards qualifiers from pointer target type This also requires the argv and envp arguments to be consted twice, once for the pointer array and once for the strings the array points to. This is because do_execve() passes a pointer to the filename (now const) to copy_strings_kernel(). A simpler alternative would be to cast the filename pointer in do_execve() when it's passed to copy_strings_kernel(). do_execve() may not change any of the strings it is passed as part of the argv or envp lists as they are some of them in .rodata, so marking these strings as const should be fine. Further kernel_execve() and sys_execve() need to be changed to match. This has been test built on x86_64, frv, arm and mips. Signed-off-by: David Howells Tested-by: Ralf Baechle Acked-by: Russell King Signed-off-by: Linus Torvalds --- kernel/kmod.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/kmod.c b/kernel/kmod.c index 6e9b196..9cd0591 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -153,7 +153,9 @@ static int ____call_usermodehelper(void *data) goto fail; } - retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp); + retval = kernel_execve(sub_info->path, + (const char *const *)sub_info->argv, + (const char *const *)sub_info->envp); /* Exec failed? */ fail: -- cgit v1.1 From f362b73244fb16ea4ae127ced1467dd8adaa7733 Mon Sep 17 00:00:00 2001 From: Daniel J Blueman Date: Tue, 17 Aug 2010 23:56:55 +0100 Subject: Fix unprotected access to task credentials in waitid() Using a program like the following: #include #include #include #include int main() { id_t id; siginfo_t infop; pid_t res; id = fork(); if (id == 0) { sleep(1); exit(0); } kill(id, SIGSTOP); alarm(1); waitid(P_PID, id, &infop, WCONTINUED); return 0; } to call waitid() on a stopped process results in access to the child task's credentials without the RCU read lock being held - which may be replaced in the meantime - eliciting the following warning: =================================================== [ INFO: suspicious rcu_dereference_check() usage. ] --------------------------------------------------- kernel/exit.c:1460 invoked rcu_dereference_check() without protection! other info that might help us debug this: rcu_scheduler_active = 1, debug_locks = 1 2 locks held by waitid02/22252: #0: (tasklist_lock){.?.?..}, at: [] do_wait+0xc5/0x310 #1: (&(&sighand->siglock)->rlock){-.-...}, at: [] wait_consider_task+0x19a/0xbe0 stack backtrace: Pid: 22252, comm: waitid02 Not tainted 2.6.35-323cd+ #3 Call Trace: [] lockdep_rcu_dereference+0xa4/0xc0 [] wait_consider_task+0xaf1/0xbe0 [] do_wait+0xf5/0x310 [] sys_waitid+0x86/0x1f0 [] ? child_wait_callback+0x0/0x70 [] system_call_fastpath+0x16/0x1b This is fixed by holding the RCU read lock in wait_task_continued() to ensure that the task's current credentials aren't destroyed between us reading the cred pointer and us reading the UID from those credentials. Furthermore, protect wait_task_stopped() in the same way. We don't need to keep holding the RCU read lock once we've read the UID from the credentials as holding the RCU read lock doesn't stop the target task from changing its creds under us - so the credentials may be outdated immediately after we've read the pointer, lock or no lock. Signed-off-by: Daniel J Blueman Signed-off-by: David Howells Acked-by: Paul E. McKenney Acked-by: Oleg Nesterov Signed-off-by: Linus Torvalds --- kernel/exit.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 671ed56..0312022 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1386,8 +1386,7 @@ static int wait_task_stopped(struct wait_opts *wo, if (!unlikely(wo->wo_flags & WNOWAIT)) *p_code = 0; - /* don't need the RCU readlock here as we're holding a spinlock */ - uid = __task_cred(p)->uid; + uid = task_uid(p); unlock_sig: spin_unlock_irq(&p->sighand->siglock); if (!exit_code) @@ -1460,7 +1459,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) } if (!unlikely(wo->wo_flags & WNOWAIT)) p->signal->flags &= ~SIGNAL_STOP_CONTINUED; - uid = __task_cred(p)->uid; + uid = task_uid(p); spin_unlock_irq(&p->sighand->siglock); pid = task_pid_vnr(p); -- cgit v1.1 From 2a4419b5b2a77f3f4537c14f7ad7df95770655dd Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Wed, 18 Aug 2010 04:37:33 +1000 Subject: fs: fs_struct rwlock to spinlock fs: fs_struct rwlock to spinlock struct fs_struct.lock is an rwlock with the read-side used to protect root and pwd members while taking references to them. Taking a reference to a path typically requires just 2 atomic ops, so the critical section is very small. Parallel read-side operations would have cacheline contention on the lock, the dentry, and the vfsmount cachelines, so the rwlock is unlikely to ever give a real parallelism increase. Replace it with a spinlock to avoid one or two atomic operations in typical path lookup fastpath. Signed-off-by: Nick Piggin Signed-off-by: Al Viro --- kernel/fork.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 98b4508..856eac3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -752,13 +752,13 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) struct fs_struct *fs = current->fs; if (clone_flags & CLONE_FS) { /* tsk->fs is already what we want */ - write_lock(&fs->lock); + spin_lock(&fs->lock); if (fs->in_exec) { - write_unlock(&fs->lock); + spin_unlock(&fs->lock); return -EAGAIN; } fs->users++; - write_unlock(&fs->lock); + spin_unlock(&fs->lock); return 0; } tsk->fs = copy_fs_struct(fs); @@ -1676,13 +1676,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) if (new_fs) { fs = current->fs; - write_lock(&fs->lock); + spin_lock(&fs->lock); current->fs = new_fs; if (--fs->users) new_fs = NULL; else new_fs = fs; - write_unlock(&fs->lock); + spin_unlock(&fs->lock); } if (new_mm) { -- cgit v1.1 From 1495cc9df4e81f5a8fa9b0b8f1034b14d24b7d8c Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Tue, 17 Aug 2010 21:15:46 -0700 Subject: Input: sysrq - drop tty argument from sysrq ops handlers Noone is using tty argument so let's get rid of it. Acked-by: Alan Cox Acked-by: Jason Wessel Acked-by: Greg Kroah-Hartman Signed-off-by: Dmitry Torokhov --- kernel/debug/debug_core.c | 2 +- kernel/power/poweroff.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 3c2d497..de407c7 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -741,7 +741,7 @@ static struct console kgdbcons = { }; #ifdef CONFIG_MAGIC_SYSRQ -static void sysrq_handle_dbg(int key, struct tty_struct *tty) +static void sysrq_handle_dbg(int key) { if (!dbg_io_ops) { printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c index e8b3370..d523593 100644 --- a/kernel/power/poweroff.c +++ b/kernel/power/poweroff.c @@ -24,7 +24,7 @@ static void do_poweroff(struct work_struct *dummy) static DECLARE_WORK(poweroff_work, do_poweroff); -static void handle_poweroff(int key, struct tty_struct *tty) +static void handle_poweroff(int key) { /* run sysrq poweroff on boot cpu */ schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work); -- cgit v1.1 From 861d034ee814917a83bd5de4b26e3b8336ddeeb8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 19 Aug 2010 13:31:43 +0200 Subject: sched: Fix rq->clock synchronization when migrating tasks sched_fork() -- we do task placement in ->task_fork_fair() ensure we update_rq_clock() so we work with current time. We leave the vruntime in relative state, so the time delay until wake_up_new_task() doesn't matter. wake_up_new_task() -- Since task_fork_fair() left p->vruntime in relative state we can safely migrate, the activate_task() on the remote rq will call update_rq_clock() and causes the clock to be synced (enough). Tested-by: Jack Daniel Tested-by: Philby John Signed-off-by: Peter Zijlstra LKML-Reference: <1281002322.1923.1708.camel@laptop> Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 806d1b2..ab661eb 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -3752,6 +3752,8 @@ static void task_fork_fair(struct task_struct *p) raw_spin_lock_irqsave(&rq->lock, flags); + update_rq_clock(rq); + if (unlikely(task_cpu(p) != this_cpu)) __set_task_cpu(p, this_cpu); -- cgit v1.1 From b35de43b31040828f83046f40fd34ba33146409d Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Thu, 19 Aug 2010 14:13:27 -0700 Subject: kfifo: implement missing __kfifo_skip_r() kfifo_skip() is currently broken, due to the missing of the internal helper function. Add it. Signed-off-by: Andrea Righi Cc: Greg KH Acked-by: Stefani Seibold Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kfifo.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'kernel') diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 4502604..6b5580c 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c @@ -503,6 +503,15 @@ unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf, } EXPORT_SYMBOL(__kfifo_out_r); +void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize) +{ + unsigned int n; + + n = __kfifo_peek_n(fifo, recsize); + fifo->out += n + recsize; +} +EXPORT_SYMBOL(__kfifo_skip_r); + int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from, unsigned long len, unsigned int *copied, size_t recsize) { -- cgit v1.1 From f335397d177c906256ee1bba28e8c49e8ec63817 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Tue, 17 Aug 2010 21:15:47 -0700 Subject: Input: sysrq - drop tty argument form handle_sysrq() Sysrq operations do not accept tty argument anymore so no need to pass it to us. [Stephen Rothwell : fix build breakage in drm code caused by sysrq using bool but not including linux/types.h] [Sachin Sant : fix build breakage in s390 keyboadr driver] Acked-by: Alan Cox Acked-by: Jason Wessel Acked-by: Greg Kroah-Hartman Signed-off-by: Dmitry Torokhov --- kernel/debug/kdb/kdb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 28b8441..caf057a 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -1929,7 +1929,7 @@ static int kdb_sr(int argc, const char **argv) if (argc != 1) return KDB_ARGCOUNT; kdb_trap_printk++; - __handle_sysrq(*argv[1], NULL, 0); + __handle_sysrq(*argv[1], false); kdb_trap_printk--; return 0; -- cgit v1.1 From 297c5eee372478fc32fec5fe8eed711eedb13f3d Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 20 Aug 2010 16:24:55 -0700 Subject: mm: make the vma list be doubly linked It's a really simple list, and several of the users want to go backwards in it to find the previous vma. So rather than have to look up the previous entry with 'find_vma_prev()' or something similar, just make it doubly linked instead. Tested-by: Ian Campbell Signed-off-by: Linus Torvalds --- kernel/fork.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 856eac3..b7e9d60 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -300,7 +300,7 @@ out: #ifdef CONFIG_MMU static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { - struct vm_area_struct *mpnt, *tmp, **pprev; + struct vm_area_struct *mpnt, *tmp, *prev, **pprev; struct rb_node **rb_link, *rb_parent; int retval; unsigned long charge; @@ -328,6 +328,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) if (retval) goto out; + prev = NULL; for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { struct file *file; @@ -359,7 +360,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) goto fail_nomem_anon_vma_fork; tmp->vm_flags &= ~VM_LOCKED; tmp->vm_mm = mm; - tmp->vm_next = NULL; + tmp->vm_next = tmp->vm_prev = NULL; file = tmp->vm_file; if (file) { struct inode *inode = file->f_path.dentry->d_inode; @@ -392,6 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) */ *pprev = tmp; pprev = &tmp->vm_next; + tmp->vm_prev = prev; + prev = tmp; __vma_link_rb(mm, tmp, rb_link, rb_parent); rb_link = &tmp->vm_rb.rb_right; -- cgit v1.1 From e36c886a0f9d624377977fa6cae309cfd7f362fa Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sat, 21 Aug 2010 13:07:26 -0700 Subject: workqueue: Add basic tracepoints to track workqueue execution With the introduction of the new unified work queue thread pools, we lost one feature: It's no longer possible to know which worker is causing the CPU to wake out of idle. The result is that PowerTOP now reports a lot of "kworker/a:b" instead of more readable results. This patch adds a pair of tracepoints to the new workqueue code, similar in style to the timer/hrtimer tracepoints. With this pair of tracepoints, the next PowerTOP can correctly report which work item caused the wakeup (and how long it took): Interrupt (43) i915 time 3.51ms wakeups 141 Work ieee80211_iface_work time 0.81ms wakeups 29 Work do_dbs_timer time 0.55ms wakeups 24 Process Xorg time 21.36ms wakeups 4 Timer sched_rt_period_timer time 0.01ms wakeups 1 Signed-off-by: Arjan van de Ven Signed-off-by: Linus Torvalds --- kernel/workqueue.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2994a0e..8bd600c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -35,6 +35,9 @@ #include #include +#define CREATE_TRACE_POINTS +#include + #include "workqueue_sched.h" enum { @@ -1790,7 +1793,13 @@ static void process_one_work(struct worker *worker, struct work_struct *work) work_clear_pending(work); lock_map_acquire(&cwq->wq->lockdep_map); lock_map_acquire(&lockdep_map); + trace_workqueue_execute_start(work); f(work); + /* + * While we must be careful to not use "work" after this, the trace + * point will only record its address. + */ + trace_workqueue_execute_end(work); lock_map_release(&lockdep_map); lock_map_release(&cwq->wq->lockdep_map); -- cgit v1.1 From c6db67cda735d8ace5f19c3831240e1408679790 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 20 Aug 2010 11:49:15 +0200 Subject: watchdog: Don't throttle the watchdog Stephane reported that when the machine locks up, the regular ticks, which are responsible to resetting the throttle count, stop too. Hence the NMI watchdog can end up being throttled before it reports on the locked up state, and we end up being sad.. Cure this by having the watchdog overflow reset its own throttle count. Reported-by: Stephane Eranian Tested-by: Stephane Eranian Cc: Don Zickus Cc: Frederic Weisbecker Signed-off-by: Peter Zijlstra LKML-Reference: <1282215916.1926.4696.camel@laptop> Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 613bc1f..0d53c8e 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -206,6 +206,9 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { + /* Ensure the watchdog never gets throttled */ + event->hw.interrupts = 0; + if (__get_cpu_var(watchdog_nmi_touch) == true) { __get_cpu_var(watchdog_nmi_touch) = false; return; -- cgit v1.1 From 9d0f4dcc5c4d1c5dd01172172684a45b5f49d740 Mon Sep 17 00:00:00 2001 From: Tim Chen Date: Wed, 18 Aug 2010 15:00:27 -0700 Subject: mutex: Improve the scalability of optimistic spinning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is a scalability issue for current implementation of optimistic mutex spin in the kernel. It is found on a 8 node 64 core Nehalem-EX system (HT mode). The intention of the optimistic mutex spin is to busy wait and spin on a mutex if the owner of the mutex is running, in the hope that the mutex will be released soon and be acquired, without the thread trying to acquire mutex going to sleep. However, when we have a large number of threads, contending for the mutex, we could have the mutex grabbed by other thread, and then another ……, and we will keep spinning, wasting cpu cycles and adding to the contention. One possible fix is to quit spinning and put the current thread on wait-list if mutex lock switch to a new owner while we spin, indicating heavy contention (see the patch included). I did some testing on a 8 socket Nehalem-EX system with a total of 64 cores. Using Ingo's test-mutex program that creates/delete files with 256 threads (http://lkml.org/lkml/2006/1/8/50) , I see the following speed up after putting in the mutex spin fix: ./mutex-test V 256 10 Ops/sec 2.6.34 62864 With fix 197200 Repeating the test with Aim7 fserver workload, again there is a speed up with the fix: Jobs/min 2.6.34 91657 With fix 149325 To look at the impact on the distribution of mutex acquisition time, I collected the mutex acquisition time on Aim7 fserver workload with some instrumentation. The average acquisition time is reduced by 48% and number of contentions reduced by 32%. #contentions Time to acquire mutex (cycles) 2.6.34 72973 44765791 With fix 49210 23067129 The histogram of mutex acquisition time is listed below. The acquisition time is in 2^bin cycles. We see that without the fix, the acquisition time is mostly around 2^26 cycles. With the fix, we the distribution get spread out a lot more towards the lower cycles, starting from 2^13. However, there is an increase of the tail distribution with the fix at 2^28 and 2^29 cycles. It seems a small price to pay for the reduced average acquisition time and also getting the cpu to do useful work. Mutex acquisition time distribution (acq time = 2^bin cycles): 2.6.34 With Fix bin #occurrence % #occurrence % 11 2 0.00% 120 0.24% 12 10 0.01% 790 1.61% 13 14 0.02% 2058 4.18% 14 86 0.12% 3378 6.86% 15 393 0.54% 4831 9.82% 16 710 0.97% 4893 9.94% 17 815 1.12% 4667 9.48% 18 790 1.08% 5147 10.46% 19 580 0.80% 6250 12.70% 20 429 0.59% 6870 13.96% 21 311 0.43% 1809 3.68% 22 255 0.35% 2305 4.68% 23 317 0.44% 916 1.86% 24 610 0.84% 233 0.47% 25 3128 4.29% 95 0.19% 26 63902 87.69% 122 0.25% 27 619 0.85% 286 0.58% 28 0 0.00% 3536 7.19% 29 0 0.00% 903 1.83% 30 0 0.00% 0 0.00% I've done similar experiments with 2.6.35 kernel on smaller boxes as well. One is on a dual-socket Westmere box (12 cores total, with HT). Another experiment is on an old dual-socket Core 2 box (4 cores total, no HT) On the 12-core Westmere box, I see a 250% increase for Ingo's mutex-test program with my mutex patch but no significant difference in aim7's fserver workload. On the 4-core Core 2 box, I see the difference with the patch for both mutex-test and aim7 fserver are negligible. So far, it seems like the patch has not caused regression on smaller systems. Signed-off-by: Tim Chen Acked-by: Peter Zijlstra Cc: Linus Torvalds Cc: Andrew Morton Cc: Thomas Gleixner Cc: Frederic Weisbecker Cc: # .35.x LKML-Reference: <1282168827.9542.72.camel@schen9-DESK> Signed-off-by: Ingo Molnar --- kernel/sched.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 41541d7..09b574e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3865,8 +3865,16 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) /* * Owner changed, break to re-assess state. */ - if (lock->owner != owner) + if (lock->owner != owner) { + /* + * If the lock has switched to a different owner, + * we likely have heavy contention. Return 0 to quit + * optimistic spinning and not contend further: + */ + if (lock->owner) + return 0; break; + } /* * Is that owner really running on that cpu? -- cgit v1.1 From bac1e74dba9755128748b872a0f304dad4d198c6 Mon Sep 17 00:00:00 2001 From: David Alan Gilbert Date: Tue, 24 Aug 2010 20:22:18 +0200 Subject: PM QoS: Fix kzalloc() parameters swapped in pm_qos_power_open() sparse spotted that the kzalloc() in pm_qos_power_open() in the current Linus' git tree had its parameters swapped. Fix this. Signed-off-by: David Alan Gilbert Acked-by: mark gross Signed-off-by: Rafael J. Wysocki --- kernel/pm_qos_params.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index 996a4de..9da439b 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c @@ -348,7 +348,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp) pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); if (pm_qos_class >= 0) { - struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req)); + struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; -- cgit v1.1 From 151772dbfad4dbe81721e40f9b3d588ea77bb7aa Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Wed, 25 Aug 2010 11:32:38 +1000 Subject: tracing/trace_stack: Fix stack trace on ppc64 save_stack_trace() stores the instruction pointer, not the function descriptor. On ppc64 the trace stack code currently dereferences the instruction pointer and shows 8 bytes of instructions in our backtraces: # cat /sys/kernel/debug/tracing/stack_trace Depth Size Location (26 entries) ----- ---- -------- 0) 5424 112 0x6000000048000004 1) 5312 160 0x60000000ebad01b0 2) 5152 160 0x2c23000041c20030 3) 4992 240 0x600000007c781b79 4) 4752 160 0xe84100284800000c 5) 4592 192 0x600000002fa30000 6) 4400 256 0x7f1800347b7407e0 7) 4144 208 0xe89f0108f87f0070 8) 3936 272 0xe84100282fa30000 Since we aren't dealing with function descriptors, use %pS instead of %pF to fix it: # cat /sys/kernel/debug/tracing/stack_trace Depth Size Location (26 entries) ----- ---- -------- 0) 5424 112 ftrace_call+0x4/0x8 1) 5312 160 .current_io_context+0x28/0x74 2) 5152 160 .get_io_context+0x48/0xa0 3) 4992 240 .cfq_set_request+0x94/0x4c4 4) 4752 160 .elv_set_request+0x60/0x84 5) 4592 192 .get_request+0x2d4/0x468 6) 4400 256 .get_request_wait+0x7c/0x258 7) 4144 208 .__make_request+0x49c/0x610 8) 3936 272 .generic_make_request+0x390/0x434 Signed-off-by: Anton Blanchard Cc: rostedt@goodmis.org Cc: fweisbec@gmail.com LKML-Reference: <20100825013238.GE28360@kryten> Signed-off-by: Ingo Molnar --- kernel/trace/trace_stack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 056468e..a6b7e0e 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -249,7 +249,7 @@ static int trace_lookup_stack(struct seq_file *m, long i) { unsigned long addr = stack_dump_trace[i]; - return seq_printf(m, "%pF\n", (void *)addr); + return seq_printf(m, "%pS\n", (void *)addr); } static void print_disabled(struct seq_file *m) -- cgit v1.1 From 25cc69ec34a563e943e85b3b68a79a8aac7f076d Mon Sep 17 00:00:00 2001 From: Saravana Kannan Date: Thu, 26 Aug 2010 20:18:43 +0200 Subject: PM QoS: Fix inline documentation. Fix the pm_qos_add_request() kerneldoc comment that doesn't reflect the behavior of the function after the last PM QoS update. Signed-off-by: Saravana Kannan Acked-by: mark gross Signed-off-by: Rafael J. Wysocki --- kernel/pm_qos_params.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index 9da439b..b7e4c36 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c @@ -212,15 +212,17 @@ EXPORT_SYMBOL_GPL(pm_qos_request_active); /** * pm_qos_add_request - inserts new qos request into the list - * @pm_qos_class: identifies which list of qos request to us + * @dep: pointer to a preallocated handle + * @pm_qos_class: identifies which list of qos request to use * @value: defines the qos request * * This function inserts a new entry in the pm_qos_class list of requested qos * performance characteristics. It recomputes the aggregate QoS expectations - * for the pm_qos_class of parameters, and returns the pm_qos_request list - * element as a handle for use in updating and removal. Call needs to save - * this handle for later use. + * for the pm_qos_class of parameters and initializes the pm_qos_request_list + * handle. Caller needs to save this handle for later use in updates and + * removal. */ + void pm_qos_add_request(struct pm_qos_request_list *dep, int pm_qos_class, s32 value) { -- cgit v1.1