From 3bccf467727c82421e5f7b630c9bb864ebe8d2e6 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 27 Jan 2012 17:49:16 +0900 Subject: sh: cpufreq: percpu struct clk accounting. At the moment there is simply a global struct clk pointer for the CPU frequency, which is fundamentally broken in the SMP case. This moves to fix it up by switching to a percpu case. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpufreq.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c index 0ffface..8203865 100644 --- a/arch/sh/kernel/cpufreq.c +++ b/arch/sh/kernel/cpufreq.c @@ -3,7 +3,7 @@ * * cpufreq driver for the SuperH processors. * - * Copyright (C) 2002 - 2007 Paul Mundt + * Copyright (C) 2002 - 2012 Paul Mundt * Copyright (C) 2002 M. R. Brown * * Clock framework bits from arch/avr32/mach-at32ap/cpufreq.c @@ -24,12 +24,14 @@ #include #include /* set_cpus_allowed() */ #include +#include +#include -static struct clk *cpuclk; +static DEFINE_PER_CPU(struct clk, sh_cpuclk); static unsigned int sh_cpufreq_get(unsigned int cpu) { - return (clk_get_rate(cpuclk) + 500) / 1000; + return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000; } /* @@ -40,6 +42,7 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy, unsigned int relation) { unsigned int cpu = policy->cpu; + struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); cpumask_t cpus_allowed; struct cpufreq_freqs freqs; long freq; @@ -77,13 +80,15 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy, static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) { - if (!cpu_online(policy->cpu)) + unsigned int cpu = policy->cpu; + struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); + + if (!cpu_online(cpu)) return -ENODEV; cpuclk = clk_get(NULL, "cpu_clk"); if (IS_ERR(cpuclk)) { - printk(KERN_ERR "cpufreq: couldn't get CPU#%d clk\n", - policy->cpu); + printk(KERN_ERR "cpufreq: couldn't get CPU#%d clk\n", cpu); return PTR_ERR(cpuclk); } @@ -92,7 +97,7 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - policy->cur = sh_cpufreq_get(policy->cpu); + policy->cur = sh_cpufreq_get(cpu); policy->min = policy->cpuinfo.min_freq; policy->max = policy->cpuinfo.max_freq; @@ -102,7 +107,7 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) */ if (unlikely(policy->min == policy->max)) { printk(KERN_ERR "cpufreq: clock framework rate rounding " - "not supported on CPU#%d.\n", policy->cpu); + "not supported on CPU#%d.\n", cpu); clk_put(cpuclk); return -EINVAL; @@ -110,7 +115,7 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) printk(KERN_INFO "cpufreq: CPU#%d Frequencies - Minimum %u.%03u MHz, " "Maximum %u.%03u MHz.\n", - policy->cpu, policy->min / 1000, policy->min % 1000, + cpu, policy->min / 1000, policy->min % 1000, policy->max / 1000, policy->max % 1000); return 0; @@ -125,6 +130,7 @@ static int sh_cpufreq_verify(struct cpufreq_policy *policy) static int sh_cpufreq_exit(struct cpufreq_policy *policy) { + struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); clk_put(cpuclk); return 0; } -- cgit v1.1 From ecbef17adbbbe89eb6b3e4d4e5b756d63041319c Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 27 Jan 2012 19:44:49 +0900 Subject: sh: cpufreq: struct device lookup from CPU topology. The struct device pointer associated with the CPU we're on can be fetched via the topology information. Tie this in to localize the CPU clock lookup. While we're at it, tidy up some of the debug/info printing notices too. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpufreq.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c index 8203865..66dbb74 100644 --- a/arch/sh/kernel/cpufreq.c +++ b/arch/sh/kernel/cpufreq.c @@ -14,6 +14,8 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. */ +#define pr_fmt(fmt) "cpufreq: " fmt + #include #include #include @@ -21,6 +23,7 @@ #include #include #include +#include #include #include /* set_cpus_allowed() */ #include @@ -45,6 +48,7 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy, struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); cpumask_t cpus_allowed; struct cpufreq_freqs freqs; + struct device *dev; long freq; if (!cpu_online(cpu)) @@ -55,13 +59,15 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy, BUG_ON(smp_processor_id() != cpu); + dev = get_cpu_device(cpu); + /* Convert target_freq from kHz to Hz */ freq = clk_round_rate(cpuclk, target_freq * 1000); if (freq < (policy->min * 1000) || freq > (policy->max * 1000)) return -EINVAL; - pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); + dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000); freqs.cpu = cpu; freqs.old = sh_cpufreq_get(cpu); @@ -73,7 +79,7 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy, clk_set_rate(cpuclk, freq); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - pr_debug("cpufreq: set frequency %lu Hz\n", freq); + dev_dbg(dev, "set frequency %lu Hz\n", freq); return 0; } @@ -82,13 +88,16 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); + struct device *dev; if (!cpu_online(cpu)) return -ENODEV; - cpuclk = clk_get(NULL, "cpu_clk"); + dev = get_cpu_device(cpu); + + cpuclk = clk_get(dev, "cpu_clk"); if (IS_ERR(cpuclk)) { - printk(KERN_ERR "cpufreq: couldn't get CPU#%d clk\n", cpu); + dev_err(dev, "couldn't get CPU clk\n"); return PTR_ERR(cpuclk); } @@ -106,16 +115,14 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) * properly to support scaling. */ if (unlikely(policy->min == policy->max)) { - printk(KERN_ERR "cpufreq: clock framework rate rounding " - "not supported on CPU#%d.\n", cpu); - + dev_err(dev, "rate rounding not supported on this CPU.\n"); clk_put(cpuclk); return -EINVAL; } - printk(KERN_INFO "cpufreq: CPU#%d Frequencies - Minimum %u.%03u MHz, " + dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, " "Maximum %u.%03u MHz.\n", - cpu, policy->min / 1000, policy->min % 1000, + policy->min / 1000, policy->min % 1000, policy->max / 1000, policy->max % 1000); return 0; @@ -147,7 +154,7 @@ static struct cpufreq_driver sh_cpufreq_driver = { static int __init sh_cpufreq_module_init(void) { - printk(KERN_INFO "cpufreq: SuperH CPU frequency driver.\n"); + pr_notice("SuperH CPU frequency driver.\n"); return cpufreq_register_driver(&sh_cpufreq_driver); } -- cgit v1.1 From 1bcfc723c8688a257df920999a43bcc2e59d5908 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 27 Jan 2012 20:18:24 +0900 Subject: sh: cpufreq: Support CPU clock frequency table. This adds support for the frequency table provided by the clock framework under the struct clk definition (if available). In cases where no table is generated or otherwise supported, we fall back on coarse grained scaling via clock framework rounding, as before. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpufreq.c | 74 ++++++++++++++++++++++++++++++------------------ 1 file changed, 46 insertions(+), 28 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c index 66dbb74..e0accdc 100644 --- a/arch/sh/kernel/cpufreq.c +++ b/arch/sh/kernel/cpufreq.c @@ -84,10 +84,32 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy, return 0; } +static int sh_cpufreq_verify(struct cpufreq_policy *policy) +{ + struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); + struct cpufreq_frequency_table *freq_table; + + freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL; + if (freq_table) + return cpufreq_frequency_table_verify(policy, freq_table); + + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + + policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000; + policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; + + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + + return 0; +} + static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); + struct cpufreq_frequency_table *freq_table; struct device *dev; if (!cpu_online(cpu)) @@ -101,25 +123,24 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) return PTR_ERR(cpuclk); } - /* cpuinfo and default policy values */ - policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000; - policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + policy->cur = policy->min = policy->max = sh_cpufreq_get(cpu); - policy->cur = sh_cpufreq_get(cpu); - policy->min = policy->cpuinfo.min_freq; - policy->max = policy->cpuinfo.max_freq; - - /* - * Catch the cases where the clock framework hasn't been wired up - * properly to support scaling. - */ - if (unlikely(policy->min == policy->max)) { - dev_err(dev, "rate rounding not supported on this CPU.\n"); - clk_put(cpuclk); - return -EINVAL; + freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL; + if (freq_table) { + int result = cpufreq_frequency_table_cpuinfo(policy, freq_table); + + if (!result) + cpufreq_frequency_table_get_attr(freq_table, cpu); + } else { + policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000; + policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; } + policy->min = policy->cpuinfo.min_freq; + policy->max = policy->cpuinfo.max_freq; + + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, " "Maximum %u.%03u MHz.\n", policy->min / 1000, policy->min % 1000, @@ -128,28 +149,25 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) return 0; } -static int sh_cpufreq_verify(struct cpufreq_policy *policy) +static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) { - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - return 0; -} + unsigned int cpu = policy->cpu; + struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); -static int sh_cpufreq_exit(struct cpufreq_policy *policy) -{ - struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); + cpufreq_frequency_table_put_attr(cpu); clk_put(cpuclk); + return 0; } static struct cpufreq_driver sh_cpufreq_driver = { .owner = THIS_MODULE, .name = "sh", - .init = sh_cpufreq_cpu_init, - .verify = sh_cpufreq_verify, - .target = sh_cpufreq_target, .get = sh_cpufreq_get, - .exit = sh_cpufreq_exit, + .target = sh_cpufreq_target, + .verify = sh_cpufreq_verify, + .init = sh_cpufreq_cpu_init, + .exit = sh_cpufreq_cpu_exit, }; static int __init sh_cpufreq_module_init(void) -- cgit v1.1 From 1a565cf07fa1be0a6d5cf30e87ee2d204e9753d3 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 27 Jan 2012 20:43:14 +0900 Subject: sh: cpufreq: notify about rate rounding fallback. The general case for platforms that support the clock framework fully will be rate table rounding, while others will have to fall back on much coarser general rate rounding. Notify about it during boot so the limited functionality for the given subtype is appropriately noted. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpufreq.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c index e0accdc..7bacbed 100644 --- a/arch/sh/kernel/cpufreq.c +++ b/arch/sh/kernel/cpufreq.c @@ -127,13 +127,19 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL; if (freq_table) { - int result = cpufreq_frequency_table_cpuinfo(policy, freq_table); + int result; + result = cpufreq_frequency_table_cpuinfo(policy, freq_table); if (!result) cpufreq_frequency_table_get_attr(freq_table, cpu); } else { - policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000; - policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; + dev_notice(dev, "no frequency table found, falling back " + "to rate rounding.\n"); + + policy->cpuinfo.min_freq = + (clk_round_rate(cpuclk, 1) + 500) / 1000; + policy->cpuinfo.max_freq = + (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; } policy->min = policy->cpuinfo.min_freq; -- cgit v1.1 From 3cbb08ae2e6c98f8a73d7319c959e81ca3c54c11 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 27 Jan 2012 20:45:24 +0900 Subject: sh: cpufreq: Wire up scaling_available_freqs support. scaling_available_freqs is provided generically for drivers that are using frequency table based rounding. This will be optional for our case, but the generic code already takes that in to consideration, so we can simply wire it up outright. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpufreq.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c index 7bacbed..e68b45b 100644 --- a/arch/sh/kernel/cpufreq.c +++ b/arch/sh/kernel/cpufreq.c @@ -166,6 +166,11 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) return 0; } +static struct freq_attr *sh_freq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + static struct cpufreq_driver sh_cpufreq_driver = { .owner = THIS_MODULE, .name = "sh", @@ -174,6 +179,7 @@ static struct cpufreq_driver sh_cpufreq_driver = { .verify = sh_cpufreq_verify, .init = sh_cpufreq_cpu_init, .exit = sh_cpufreq_cpu_exit, + .attr = sh_freq_attr, }; static int __init sh_cpufreq_module_init(void) -- cgit v1.1 From 10a068f27ac2200c83f6d13f5e03f6e48cf06d10 Mon Sep 17 00:00:00 2001 From: "Shimoda, Yoshihiro" Date: Wed, 7 Mar 2012 14:46:41 +0900 Subject: sh: add platform_device for RSPI in setup-sh7757 Signed-off-by: Yoshihiro Shimoda Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/setup-sh7757.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c index 2875e8b..c8836cf 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c @@ -680,6 +680,25 @@ static struct platform_device spi1_device = { .resource = spi1_resources, }; +static struct resource rspi_resources[] = { + { + .start = 0xfe480000, + .end = 0xfe4800ff, + .flags = IORESOURCE_MEM, + }, + { + .start = 220, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rspi_device = { + .name = "rspi", + .id = 2, + .num_resources = ARRAY_SIZE(rspi_resources), + .resource = rspi_resources, +}; + static struct resource usb_ehci_resources[] = { [0] = { .start = 0xfe4f1000, @@ -740,6 +759,7 @@ static struct platform_device *sh7757_devices[] __initdata = { &dma3_device, &spi0_device, &spi1_device, + &rspi_device, &usb_ehci_device, &usb_ohci_device, }; -- cgit v1.1 From 9b2ffa8d048656baf7f5e764d2586ba59927a25b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 28 Mar 2012 16:20:20 +0900 Subject: sh: Avoid exporting unimplemented syscalls. Now that userspace is making use of kernel-provided sanitized headers for working out supported interfaces, we need to be a bit more diligent with matching the syscall definitions with their actual wiring/support state. In theory it shouldn't hurt anything since sys_ni_syscall will ultimately do the right thing, but there's also not much need to lie about legacy x86 syscalls that we've never supported. This tightens things up a bit for uClibc at least. Suggested-by: Carmelo Amoroso Acked-by: Mike Frysinger Signed-off-by: Paul Mundt --- arch/sh/kernel/syscalls_32.S | 8 ++++---- arch/sh/kernel/syscalls_64.S | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S index ee56a9b..4b68f0f 100644 --- a/arch/sh/kernel/syscalls_32.S +++ b/arch/sh/kernel/syscalls_32.S @@ -204,8 +204,8 @@ ENTRY(sys_call_table) .long sys_capset /* 185 */ .long sys_sigaltstack .long sys_sendfile - .long sys_ni_syscall /* streams1 */ - .long sys_ni_syscall /* streams2 */ + .long sys_ni_syscall /* getpmsg */ + .long sys_ni_syscall /* putpmsg */ .long sys_vfork /* 190 */ .long sys_getrlimit .long sys_mmap2 @@ -259,8 +259,8 @@ ENTRY(sys_call_table) .long sys_futex /* 240 */ .long sys_sched_setaffinity .long sys_sched_getaffinity - .long sys_ni_syscall - .long sys_ni_syscall + .long sys_ni_syscall /* reserved for set_thread_area */ + .long sys_ni_syscall /* reserved for get_thread_area */ .long sys_io_setup /* 245 */ .long sys_io_destroy .long sys_io_getevents diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S index 9af7de2..0956345 100644 --- a/arch/sh/kernel/syscalls_64.S +++ b/arch/sh/kernel/syscalls_64.S @@ -208,8 +208,8 @@ sys_call_table: .long sys_capset /* 185 */ .long sys_sigaltstack .long sys_sendfile - .long sys_ni_syscall /* streams1 */ - .long sys_ni_syscall /* streams2 */ + .long sys_ni_syscall /* getpmsg */ + .long sys_ni_syscall /* putpmsg */ .long sys_vfork /* 190 */ .long sys_getrlimit .long sys_mmap2 @@ -296,8 +296,8 @@ sys_call_table: .long sys_futex .long sys_sched_setaffinity .long sys_sched_getaffinity /* 270 */ - .long sys_ni_syscall - .long sys_ni_syscall + .long sys_ni_syscall /* reserved for set_thread_area */ + .long sys_ni_syscall /* reserved for get_thread_area */ .long sys_io_setup .long sys_io_destroy .long sys_io_getevents /* 275 */ -- cgit v1.1 From 8368b0e0ca5f38f605066fa0c9ea33bbc191e267 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Wed, 28 Mar 2012 15:16:31 -0700 Subject: sh: no need to reset handler if SA_ONESHOT get_signal_to_deliver() already resets the signal handler if SA_ONESHOT is set in ka->sa.sa_flags, there's no need to do it again in handle_signal(). Furthermore, because we were modifying ka->sa.sa_handler (which is a copy of sighand->action[]) instead of sighand->action[] the original code had no effect on signal delivery. Acked-by: Oleg Nesterov Cc: Paul Mundt Signed-off-by: Matt Fleming Signed-off-by: Andrew Morton Signed-off-by: Paul Mundt --- arch/sh/kernel/signal_32.c | 3 --- arch/sh/kernel/signal_64.c | 3 --- 2 files changed, 6 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index a7a55ed..1055146 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -548,9 +548,6 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, else ret = setup_frame(sig, ka, oldset, regs); - if (ka->sa.sa_flags & SA_ONESHOT) - ka->sa.sa_handler = SIG_DFL; - if (ret == 0) { spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 6b5603f..7b9278d 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c @@ -734,9 +734,6 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, else ret = setup_frame(sig, ka, oldset, regs); - if (ka->sa.sa_flags & SA_ONESHOT) - ka->sa.sa_handler = SIG_DFL; - if (ret == 0) { spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); -- cgit v1.1 From 5e047fa159cf40733c627002d0443fddff3183c7 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Wed, 28 Mar 2012 15:16:32 -0700 Subject: sh: use set_current_blocked() and block_sigmask() As described in e6fa16ab ("signal: sigprocmask() should do retarget_shared_pending()") the modification of current->blocked is incorrect as we need to check whether the signal we're about to block is pending in the shared queue. Also, use the new helper function introduced in commit 5e6292c0f28f ("signal: add block_sigmask() for adding sigmask to current->blocked") which centralises the code for updating current->blocked after successfully delivering a signal and reduces the amount of duplicate code across architectures. In the past some architectures got this code wrong, so using this helper function should stop that from happening again. Acked-by: Oleg Nesterov Cc: Paul Mundt Signed-off-by: Matt Fleming Signed-off-by: Andrew Morton Signed-off-by: Paul Mundt --- arch/sh/kernel/signal_32.c | 32 ++++++++++---------------------- arch/sh/kernel/signal_64.c | 37 ++++++++++--------------------------- 2 files changed, 20 insertions(+), 49 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index 1055146..883d711 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -58,12 +58,13 @@ sys_sigsuspend(old_sigset_t mask, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs __regs) { - mask &= _BLOCKABLE; - spin_lock_irq(¤t->sighand->siglock); + sigset_t blocked; + current->saved_sigmask = current->blocked; - siginitset(¤t->blocked, mask); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + + mask &= _BLOCKABLE; + siginitset(&blocked, mask); + set_current_blocked(&blocked); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -240,11 +241,7 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); if (restore_sigcontext(regs, &frame->sc, &r0)) goto badframe; @@ -274,10 +271,7 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) goto badframe; @@ -548,14 +542,8 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, else ret = setup_frame(sig, ka, oldset, regs); - if (ret == 0) { - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked,sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - } + if (ret == 0) + block_sigmask(ka, sig); return ret; } diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 7b9278d..3c9a6f7 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c @@ -159,14 +159,13 @@ sys_sigsuspend(old_sigset_t mask, unsigned long r6, unsigned long r7, struct pt_regs * regs) { - sigset_t saveset; + sigset_t saveset, blocked; - mask &= _BLOCKABLE; - spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; - siginitset(¤t->blocked, mask); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + + mask &= _BLOCKABLE; + siginitset(&blocked, mask); + set_current_blocked(&blocked); REF_REG_RET = -EINTR; while (1) { @@ -198,11 +197,8 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, if (copy_from_user(&newset, unewset, sizeof(newset))) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); REF_REG_RET = -EINTR; while (1) { @@ -408,11 +404,7 @@ asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3, goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); if (restore_sigcontext(regs, &frame->sc, &ret)) goto badframe; @@ -445,10 +437,7 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3, goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret)) goto badframe; @@ -734,14 +723,8 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, else ret = setup_frame(sig, ka, oldset, regs); - if (ret == 0) { - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked,sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - } + if (ret == 0) + block_sigmask(ka, sig); return ret; } -- cgit v1.1