summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/bios32.c1
-rw-r--r--arch/arm/kernel/entry-armv.S24
-rw-r--r--arch/arm/kernel/entry-common.S2
-rw-r--r--arch/arm/kernel/irq.c8
-rw-r--r--arch/arm/kernel/iwmmxt.S4
-rw-r--r--arch/arm/kernel/process.c85
-rw-r--r--arch/arm/kernel/signal.c228
-rw-r--r--arch/arm/kernel/time.c24
8 files changed, 181 insertions, 195 deletions
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index de606df..302fc14 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -702,7 +702,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
/*
* Mark this as IO
*/
- vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vma->vm_start, phys,
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index ab8e600..86c9252 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -20,6 +20,7 @@
#include <asm/glue.h>
#include <asm/vfpmacros.h>
#include <asm/arch/entry-macro.S>
+#include <asm/thread_notify.h>
#include "entry-header.S"
@@ -560,10 +561,8 @@ ENTRY(__switch_to)
add ip, r1, #TI_CPU_SAVE
ldr r3, [r2, #TI_TP_VALUE]
stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
-#ifndef CONFIG_MMU
- add r2, r2, #TI_CPU_DOMAIN
-#else
- ldr r6, [r2, #TI_CPU_DOMAIN]!
+#ifdef CONFIG_MMU
+ ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
#if __LINUX_ARM_ARCH__ >= 6
#ifdef CONFIG_CPU_32v6K
@@ -585,21 +584,20 @@ ENTRY(__switch_to)
#ifdef CONFIG_MMU
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#endif
-#ifdef CONFIG_VFP
- @ Always disable VFP so we can lazily save/restore the old
- @ state. This occurs in the context of the previous thread.
- VFPFMRX r4, FPEXC
- bic r4, r4, #FPEXC_ENABLE
- VFPFMXR FPEXC, r4
-#endif
#if defined(CONFIG_IWMMXT)
bl iwmmxt_task_switch
#elif defined(CONFIG_CPU_XSCALE)
- add r4, r2, #40 @ cpu_context_save->extra
+ add r4, r2, #TI_CPU_DOMAIN + 40 @ cpu_context_save->extra
ldmib r4, {r4, r5}
mar acc0, r4, r5
#endif
- ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
+ mov r5, r0
+ add r4, r2, #TI_CPU_SAVE
+ ldr r0, =thread_notify_head
+ mov r1, #THREAD_NOTIFY_SWITCH
+ bl atomic_notifier_call_chain
+ mov r0, r5
+ ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
__INIT
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index dbcb11a..b5bcebc 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -271,7 +271,7 @@ ENTRY(sys_call_table)
@ r8 = syscall table
.type sys_syscall, #function
sys_syscall:
- eor scno, r0, #__NR_OABI_SYSCALL_BASE
+ bic scno, r0, #__NR_OABI_SYSCALL_BASE
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
cmpne scno, #NR_syscalls @ check range
stmloia sp, {r5, r6} @ shuffle args
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 2d5896b..ec20f89 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -52,7 +52,7 @@
*/
#define MAX_IRQ_CNT 100000
-static int noirqdebug;
+static int noirqdebug __read_mostly;
static volatile unsigned long irq_err_count;
static DEFINE_SPINLOCK(irq_controller_lock);
static LIST_HEAD(irq_pending);
@@ -81,7 +81,7 @@ irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs)
void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
{
- irq_err_count += 1;
+ irq_err_count++;
printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
}
@@ -342,10 +342,10 @@ __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
#ifdef CONFIG_NO_IDLE_HZ
if (!(action->flags & SA_TIMER) && system_timer->dyn_tick != NULL) {
- write_seqlock(&xtime_lock);
+ spin_lock(&system_timer->dyn_tick->lock);
if (system_timer->dyn_tick->state & DYN_TICK_ENABLED)
system_timer->dyn_tick->handler(irq, 0, regs);
- write_sequnlock(&xtime_lock);
+ spin_unlock(&system_timer->dyn_tick->lock);
}
#endif
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index 24c7b04..a3bae95 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -273,7 +273,7 @@ ENTRY(iwmmxt_task_restore)
*
* r0 = previous task_struct pointer (must be preserved)
* r1 = previous thread_info pointer
- * r2 = next thread_info.cpu_domain pointer (must be preserved)
+ * r2 = next thread_info pointer (must be preserved)
*
* Called only from __switch_to with task preemption disabled.
* No need to care about preserving r4 and above.
@@ -285,7 +285,7 @@ ENTRY(iwmmxt_task_switch)
bne 1f @ yes: block them for next task
ldr r5, =concan_owner
- add r6, r2, #(TI_IWMMXT_STATE - TI_CPU_DOMAIN) @ get next task Concan save area
+ add r6, r2, #TI_IWMMXT_STATE @ get next task Concan save area
ldr r5, [r5] @ get current Concan owner
teq r5, r6 @ next task owns it?
movne pc, lr @ no: leave Concan disabled
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 7df6e1a..e1c77ee 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -28,10 +28,12 @@
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/elfcore.h>
+#include <linux/pm.h>
#include <asm/leds.h>
#include <asm/processor.h>
#include <asm/system.h>
+#include <asm/thread_notify.h>
#include <asm/uaccess.h>
#include <asm/mach/time.h>
@@ -71,8 +73,36 @@ static int __init hlt_setup(char *__unused)
__setup("nohlt", nohlt_setup);
__setup("hlt", hlt_setup);
+void arm_machine_restart(char mode)
+{
+ /*
+ * Clean and disable cache, and turn off interrupts
+ */
+ cpu_proc_fin();
+
+ /*
+ * Tell the mm system that we are going to reboot -
+ * we may need it to insert some 1:1 mappings so that
+ * soft boot works.
+ */
+ setup_mm_for_reboot(mode);
+
+ /*
+ * Now call the architecture specific reboot code.
+ */
+ arch_reset(mode);
+
+ /*
+ * Whoops - the architecture was unable to reboot.
+ * Tell the user!
+ */
+ mdelay(1000);
+ printk("Reboot failed -- System halted\n");
+ while (1);
+}
+
/*
- * The following aren't currently used.
+ * Function pointers to optional machine specific functions
*/
void (*pm_idle)(void);
EXPORT_SYMBOL(pm_idle);
@@ -80,6 +110,10 @@ EXPORT_SYMBOL(pm_idle);
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
+void (*arm_pm_restart)(char str) = arm_machine_restart;
+EXPORT_SYMBOL_GPL(arm_pm_restart);
+
+
/*
* This is our default idle handler. We need to disable
* interrupts here to ensure we don't miss a wakeup call.
@@ -151,33 +185,9 @@ void machine_power_off(void)
pm_power_off();
}
-
void machine_restart(char * __unused)
{
- /*
- * Clean and disable cache, and turn off interrupts
- */
- cpu_proc_fin();
-
- /*
- * Tell the mm system that we are going to reboot -
- * we may need it to insert some 1:1 mappings so that
- * soft boot works.
- */
- setup_mm_for_reboot(reboot_mode);
-
- /*
- * Now call the architecture specific reboot code.
- */
- arch_reset(reboot_mode);
-
- /*
- * Whoops - the architecture was unable to reboot.
- * Tell the user!
- */
- mdelay(1000);
- printk("Reboot failed -- System halted\n");
- while (1);
+ arm_pm_restart(reboot_mode);
}
void __show_regs(struct pt_regs *regs)
@@ -329,13 +339,9 @@ void exit_thread(void)
{
}
-static void default_fp_init(union fp_state *fp)
-{
- memset(fp, 0, sizeof(union fp_state));
-}
+ATOMIC_NOTIFIER_HEAD(thread_notify_head);
-void (*fp_init)(union fp_state *) = default_fp_init;
-EXPORT_SYMBOL(fp_init);
+EXPORT_SYMBOL_GPL(thread_notify_head);
void flush_thread(void)
{
@@ -344,22 +350,21 @@ void flush_thread(void)
memset(thread->used_cp, 0, sizeof(thread->used_cp));
memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
+ memset(&thread->fpstate, 0, sizeof(union fp_state));
+
+ thread_notify(THREAD_NOTIFY_FLUSH, thread);
#if defined(CONFIG_IWMMXT)
iwmmxt_task_release(thread);
#endif
- fp_init(&thread->fpstate);
-#if defined(CONFIG_VFP)
- vfp_flush_thread(&thread->vfpstate);
-#endif
}
void release_thread(struct task_struct *dead_task)
{
-#if defined(CONFIG_VFP)
- vfp_release_thread(&task_thread_info(dead_task)->vfpstate);
-#endif
+ struct thread_info *thread = task_thread_info(dead_task);
+
+ thread_notify(THREAD_NOTIFY_RELEASE, thread);
#if defined(CONFIG_IWMMXT)
- iwmmxt_task_release(task_thread_info(dead_task));
+ iwmmxt_task_release(thread);
#endif
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index a0cd0a9..1ce05ec 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -134,17 +134,6 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
#ifdef CONFIG_IWMMXT
-/* iwmmxt_area is 0x98 bytes long, preceeded by 8 bytes of signature */
-#define IWMMXT_STORAGE_SIZE (0x98 + 8)
-#define IWMMXT_MAGIC0 0x12ef842a
-#define IWMMXT_MAGIC1 0x1c07ca71
-
-struct iwmmxt_sigframe {
- unsigned long magic0;
- unsigned long magic1;
- unsigned long storage[0x98/4];
-};
-
static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
{
char kbuf[sizeof(*frame) + 8];
@@ -152,8 +141,8 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
/* the iWMMXt context must be 64 bit aligned */
kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
- kframe->magic0 = IWMMXT_MAGIC0;
- kframe->magic1 = IWMMXT_MAGIC1;
+ kframe->magic = IWMMXT_MAGIC;
+ kframe->size = IWMMXT_STORAGE_SIZE;
iwmmxt_task_copy(current_thread_info(), &kframe->storage);
return __copy_to_user(frame, kframe, sizeof(*frame));
}
@@ -167,8 +156,8 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
if (__copy_from_user(kframe, frame, sizeof(*frame)))
return -1;
- if (kframe->magic0 != IWMMXT_MAGIC0 ||
- kframe->magic1 != IWMMXT_MAGIC1)
+ if (kframe->magic != IWMMXT_MAGIC ||
+ kframe->size != IWMMXT_STORAGE_SIZE)
return -1;
iwmmxt_task_restore(current_thread_info(), &kframe->storage);
return 0;
@@ -177,70 +166,61 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
#endif
/*
- * Auxiliary signal frame. This saves stuff like FP state.
- * The layout of this structure is not part of the user ABI.
- */
-struct aux_sigframe {
-#ifdef CONFIG_IWMMXT
- struct iwmmxt_sigframe iwmmxt;
-#endif
-#ifdef CONFIG_VFP
- union vfp_state vfp;
-#endif
-};
-
-/*
* Do a signal return; undo the signal stack. These are aligned to 64-bit.
*/
struct sigframe {
- struct sigcontext sc;
- unsigned long extramask[_NSIG_WORDS-1];
+ struct ucontext uc;
unsigned long retcode[2];
- struct aux_sigframe aux __attribute__((aligned(8)));
};
struct rt_sigframe {
- struct siginfo __user *pinfo;
- void __user *puc;
struct siginfo info;
- struct ucontext uc;
- unsigned long retcode[2];
- struct aux_sigframe aux __attribute__((aligned(8)));
+ struct sigframe sig;
};
-static int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
- struct aux_sigframe __user *aux)
+static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
{
- int err = 0;
+ struct aux_sigframe __user *aux;
+ sigset_t set;
+ int err;
- __get_user_error(regs->ARM_r0, &sc->arm_r0, err);
- __get_user_error(regs->ARM_r1, &sc->arm_r1, err);
- __get_user_error(regs->ARM_r2, &sc->arm_r2, err);
- __get_user_error(regs->ARM_r3, &sc->arm_r3, err);
- __get_user_error(regs->ARM_r4, &sc->arm_r4, err);
- __get_user_error(regs->ARM_r5, &sc->arm_r5, err);
- __get_user_error(regs->ARM_r6, &sc->arm_r6, err);
- __get_user_error(regs->ARM_r7, &sc->arm_r7, err);
- __get_user_error(regs->ARM_r8, &sc->arm_r8, err);
- __get_user_error(regs->ARM_r9, &sc->arm_r9, err);
- __get_user_error(regs->ARM_r10, &sc->arm_r10, err);
- __get_user_error(regs->ARM_fp, &sc->arm_fp, err);
- __get_user_error(regs->ARM_ip, &sc->arm_ip, err);
- __get_user_error(regs->ARM_sp, &sc->arm_sp, err);
- __get_user_error(regs->ARM_lr, &sc->arm_lr, err);
- __get_user_error(regs->ARM_pc, &sc->arm_pc, err);
- __get_user_error(regs->ARM_cpsr, &sc->arm_cpsr, err);
+ err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
+ if (err == 0) {
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+
+ __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
+ __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
+ __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
+ __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
+ __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
+ __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
+ __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
+ __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
+ __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
+ __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
+ __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
+ __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
+ __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
+ __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
+ __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
+ __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
+ __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
err |= !valid_user_regs(regs);
+ aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
#ifdef CONFIG_IWMMXT
if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
err |= restore_iwmmxt_context(&aux->iwmmxt);
#endif
#ifdef CONFIG_VFP
// if (err == 0)
-// err |= vfp_restore_state(&aux->vfp);
+// err |= vfp_restore_state(&sf->aux.vfp);
#endif
return err;
@@ -249,7 +229,6 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
asmlinkage int sys_sigreturn(struct pt_regs *regs)
{
struct sigframe __user *frame;
- sigset_t set;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
@@ -266,19 +245,8 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
goto badframe;
- if (__get_user(set.sig[0], &frame->sc.oldmask)
- || (_NSIG_WORDS > 1
- && __copy_from_user(&set.sig[1], &frame->extramask,
- sizeof(frame->extramask))))
- goto badframe;
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- if (restore_sigcontext(regs, &frame->sc, &frame->aux))
+ if (restore_sigframe(regs, frame))
goto badframe;
/* Send SIGTRAP if we're single-stepping */
@@ -297,7 +265,6 @@ badframe:
asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
- sigset_t set;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
@@ -314,19 +281,11 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
goto badframe;
- if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
- goto badframe;
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &frame->aux))
+ if (restore_sigframe(regs, &frame->sig))
goto badframe;
- if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
+ if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
goto badframe;
/* Send SIGTRAP if we're single-stepping */
@@ -343,42 +302,46 @@ badframe:
}
static int
-setup_sigcontext(struct sigcontext __user *sc, struct aux_sigframe __user *aux,
- struct pt_regs *regs, unsigned long mask)
+setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
{
+ struct aux_sigframe __user *aux;
int err = 0;
- __put_user_error(regs->ARM_r0, &sc->arm_r0, err);
- __put_user_error(regs->ARM_r1, &sc->arm_r1, err);
- __put_user_error(regs->ARM_r2, &sc->arm_r2, err);
- __put_user_error(regs->ARM_r3, &sc->arm_r3, err);
- __put_user_error(regs->ARM_r4, &sc->arm_r4, err);
- __put_user_error(regs->ARM_r5, &sc->arm_r5, err);
- __put_user_error(regs->ARM_r6, &sc->arm_r6, err);
- __put_user_error(regs->ARM_r7, &sc->arm_r7, err);
- __put_user_error(regs->ARM_r8, &sc->arm_r8, err);
- __put_user_error(regs->ARM_r9, &sc->arm_r9, err);
- __put_user_error(regs->ARM_r10, &sc->arm_r10, err);
- __put_user_error(regs->ARM_fp, &sc->arm_fp, err);
- __put_user_error(regs->ARM_ip, &sc->arm_ip, err);
- __put_user_error(regs->ARM_sp, &sc->arm_sp, err);
- __put_user_error(regs->ARM_lr, &sc->arm_lr, err);
- __put_user_error(regs->ARM_pc, &sc->arm_pc, err);
- __put_user_error(regs->ARM_cpsr, &sc->arm_cpsr, err);
-
- __put_user_error(current->thread.trap_no, &sc->trap_no, err);
- __put_user_error(current->thread.error_code, &sc->error_code, err);
- __put_user_error(current->thread.address, &sc->fault_address, err);
- __put_user_error(mask, &sc->oldmask, err);
-
+ __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
+ __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
+ __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
+ __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
+ __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
+ __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
+ __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
+ __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
+ __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
+ __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
+ __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
+ __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
+ __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
+ __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
+ __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
+ __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
+ __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
+
+ __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
+ __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
+ __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
+ __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
+
+ err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
+
+ aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
#ifdef CONFIG_IWMMXT
if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
err |= preserve_iwmmxt_context(&aux->iwmmxt);
#endif
#ifdef CONFIG_VFP
// if (err == 0)
-// err |= vfp_save_state(&aux->vfp);
+// err |= vfp_save_state(&sf->aux.vfp);
#endif
+ __put_user_error(0, &aux->end_magic, err);
return err;
}
@@ -487,13 +450,12 @@ setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *reg
if (!frame)
return 1;
- err |= setup_sigcontext(&frame->sc, &frame->aux, regs, set->sig[0]);
-
- if (_NSIG_WORDS > 1) {
- err |= __copy_to_user(frame->extramask, &set->sig[1],
- sizeof(frame->extramask));
- }
+ /*
+ * Set uc.uc_flags to a value which sc.trap_no would never have.
+ */
+ __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
+ err |= setup_sigframe(frame, regs, set);
if (err == 0)
err = setup_return(regs, ka, frame->retcode, frame, usig);
@@ -511,25 +473,20 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
if (!frame)
return 1;
- __put_user_error(&frame->info, &frame->pinfo, err);
- __put_user_error(&frame->uc, &frame->puc, err);
err |= copy_siginfo_to_user(&frame->info, info);
- __put_user_error(0, &frame->uc.uc_flags, err);
- __put_user_error(NULL, &frame->uc.uc_link, err);
+ __put_user_error(0, &frame->sig.uc.uc_flags, err);
+ __put_user_error(NULL, &frame->sig.uc.uc_link, err);
memset(&stack, 0, sizeof(stack));
stack.ss_sp = (void __user *)current->sas_ss_sp;
stack.ss_flags = sas_ss_flags(regs->ARM_sp);
stack.ss_size = current->sas_ss_size;
- err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack));
-
- err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->aux,
- regs, set->sig[0]);
- err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack));
+ err |= setup_sigframe(&frame->sig, regs, set);
if (err == 0)
- err = setup_return(regs, ka, frame->retcode, frame, usig);
+ err = setup_return(regs, ka, frame->sig.retcode, frame, usig);
if (err == 0) {
/*
@@ -538,7 +495,7 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
* -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
*/
regs->ARM_r1 = (unsigned long)&frame->info;
- regs->ARM_r2 = (unsigned long)&frame->uc;
+ regs->ARM_r2 = (unsigned long)&frame->sig.uc;
}
return err;
@@ -665,17 +622,33 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
if (syscall) {
if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) {
if (thumb_mode(regs)) {
- regs->ARM_r7 = __NR_restart_syscall;
+ regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
regs->ARM_pc -= 2;
} else {
+#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
+ regs->ARM_r7 = __NR_restart_syscall;
+ regs->ARM_pc -= 4;
+#else
u32 __user *usp;
+ u32 swival = __NR_restart_syscall;
regs->ARM_sp -= 12;
usp = (u32 __user *)regs->ARM_sp;
+ /*
+ * Either we supports OABI only, or we have
+ * EABI with the OABI compat layer enabled.
+ * In the later case we don't know if user
+ * space is EABI or not, and if not we must
+ * not clobber r7. Always using the OABI
+ * syscall solves that issue and works for
+ * all those cases.
+ */
+ swival = swival - __NR_SYSCALL_BASE + __NR_OABI_SYSCALL_BASE;
+
put_user(regs->ARM_pc, &usp[0]);
/* swi __NR_restart_syscall */
- put_user(0xef000000 | __NR_restart_syscall, &usp[1]);
+ put_user(0xef000000 | swival, &usp[1]);
/* ldr pc, [sp], #12 */
put_user(0xe49df00c, &usp[2]);
@@ -683,6 +656,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
(unsigned long)(usp + 3));
regs->ARM_pc = regs->ARM_sp + 4;
+#endif
}
}
if (regs->ARM_r0 == -ERESTARTNOHAND ||
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index d6bd435..9c12d4f 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -379,7 +379,7 @@ static int timer_dyn_tick_enable(void)
int ret = -ENODEV;
if (dyn_tick) {
- write_seqlock_irqsave(&xtime_lock, flags);
+ spin_lock_irqsave(&dyn_tick->lock, flags);
ret = 0;
if (!(dyn_tick->state & DYN_TICK_ENABLED)) {
ret = dyn_tick->enable();
@@ -387,7 +387,7 @@ static int timer_dyn_tick_enable(void)
if (ret == 0)
dyn_tick->state |= DYN_TICK_ENABLED;
}
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ spin_unlock_irqrestore(&dyn_tick->lock, flags);
}
return ret;
@@ -400,7 +400,7 @@ static int timer_dyn_tick_disable(void)
int ret = -ENODEV;
if (dyn_tick) {
- write_seqlock_irqsave(&xtime_lock, flags);
+ spin_lock_irqsave(&dyn_tick->lock, flags);
ret = 0;
if (dyn_tick->state & DYN_TICK_ENABLED) {
ret = dyn_tick->disable();
@@ -408,7 +408,7 @@ static int timer_dyn_tick_disable(void)
if (ret == 0)
dyn_tick->state &= ~DYN_TICK_ENABLED;
}
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ spin_unlock_irqrestore(&dyn_tick->lock, flags);
}
return ret;
@@ -422,15 +422,20 @@ static int timer_dyn_tick_disable(void)
void timer_dyn_reprogram(void)
{
struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick;
- unsigned long next, seq;
+ unsigned long next, seq, flags;
- if (dyn_tick && (dyn_tick->state & DYN_TICK_ENABLED)) {
+ if (!dyn_tick)
+ return;
+
+ spin_lock_irqsave(&dyn_tick->lock, flags);
+ if (dyn_tick->state & DYN_TICK_ENABLED) {
next = next_timer_interrupt();
do {
seq = read_seqbegin(&xtime_lock);
- dyn_tick->reprogram(next_timer_interrupt() - jiffies);
+ dyn_tick->reprogram(next - jiffies);
} while (read_seqretry(&xtime_lock, seq));
}
+ spin_unlock_irqrestore(&dyn_tick->lock, flags);
}
static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf)
@@ -499,5 +504,10 @@ void __init time_init(void)
if (system_timer->offset == NULL)
system_timer->offset = dummy_gettimeoffset;
system_timer->init();
+
+#ifdef CONFIG_NO_IDLE_HZ
+ if (system_timer->dyn_tick)
+ system_timer->dyn_tick->lock = SPIN_LOCK_UNLOCKED;
+#endif
}
OpenPOWER on IntegriCloud