summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2009-02-11 13:12:56 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-02-12 10:59:43 +0000
commit3d1228ead618b88e8606015cbabc49019981805d (patch)
tree5d44c936c12ff2e99732f535dd393af201e7fe67 /arch
parentf373e8c0639f1720d2d0fe414990f504e113c2ba (diff)
downloadop-kernel-dev-3d1228ead618b88e8606015cbabc49019981805d.zip
op-kernel-dev-3d1228ead618b88e8606015cbabc49019981805d.tar.gz
[ARM] 5387/1: Add ptrace VFP support on ARM
This patch adds ptrace support for setting and getting the VFP registers using PTRACE_SETVFPREGS and PTRACE_GETVFPREGS. The user_vfp structure defined in asm/user.h contains 32 double registers (to cover VFPv3 and Neon hardware) and the FPSCR register. Cc: Paul Brook <paul@codesourcery.com> Cc: Daniel Jacobowitz <dan@codesourcery.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/ptrace.h2
-rw-r--r--arch/arm/include/asm/thread_info.h2
-rw-r--r--arch/arm/include/asm/user.h9
-rw-r--r--arch/arm/kernel/ptrace.c58
-rw-r--r--arch/arm/vfp/vfp.h2
-rw-r--r--arch/arm/vfp/vfphw.S2
-rw-r--r--arch/arm/vfp/vfpmodule.c49
7 files changed, 120 insertions, 4 deletions
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 7319261..236a06b 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -27,6 +27,8 @@
/* PTRACE_SYSCALL is 24 */
#define PTRACE_GETCRUNCHREGS 25
#define PTRACE_SETCRUNCHREGS 26
+#define PTRACE_GETVFPREGS 27
+#define PTRACE_SETVFPREGS 28
/*
* PSR bits
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 68b9ec8..b9dc8a8 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -113,6 +113,8 @@ extern void iwmmxt_task_restore(struct thread_info *, void *);
extern void iwmmxt_task_release(struct thread_info *);
extern void iwmmxt_task_switch(struct thread_info *);
+extern void vfp_sync_state(struct thread_info *thread);
+
#endif
/*
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h
index 825c1e7..df95e05 100644
--- a/arch/arm/include/asm/user.h
+++ b/arch/arm/include/asm/user.h
@@ -81,4 +81,13 @@ struct user{
#define HOST_TEXT_START_ADDR (u.start_code)
#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
+/*
+ * User specific VFP registers. If only VFPv2 is present, registers 16 to 31
+ * are ignored by the ptrace system call.
+ */
+struct user_vfp {
+ unsigned long long fpregs[32];
+ unsigned long fpscr;
+};
+
#endif /* _ARM_USER_H */
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index df653ea..89882a1 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -653,6 +653,54 @@ static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
}
#endif
+#ifdef CONFIG_VFP
+/*
+ * Get the child VFP state.
+ */
+static int ptrace_getvfpregs(struct task_struct *tsk, void __user *data)
+{
+ struct thread_info *thread = task_thread_info(tsk);
+ union vfp_state *vfp = &thread->vfpstate;
+ struct user_vfp __user *ufp = data;
+
+ vfp_sync_state(thread);
+
+ /* copy the floating point registers */
+ if (copy_to_user(&ufp->fpregs, &vfp->hard.fpregs,
+ sizeof(vfp->hard.fpregs)))
+ return -EFAULT;
+
+ /* copy the status and control register */
+ if (put_user(vfp->hard.fpscr, &ufp->fpscr))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Set the child VFP state.
+ */
+static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data)
+{
+ struct thread_info *thread = task_thread_info(tsk);
+ union vfp_state *vfp = &thread->vfpstate;
+ struct user_vfp __user *ufp = data;
+
+ vfp_sync_state(thread);
+
+ /* copy the floating point registers */
+ if (copy_from_user(&vfp->hard.fpregs, &ufp->fpregs,
+ sizeof(vfp->hard.fpregs)))
+ return -EFAULT;
+
+ /* copy the status and control register */
+ if (get_user(vfp->hard.fpscr, &ufp->fpscr))
+ return -EFAULT;
+
+ return 0;
+}
+#endif
+
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
int ret;
@@ -775,6 +823,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
#endif
+#ifdef CONFIG_VFP
+ case PTRACE_GETVFPREGS:
+ ret = ptrace_getvfpregs(child, (void __user *)data);
+ break;
+
+ case PTRACE_SETVFPREGS:
+ ret = ptrace_setvfpregs(child, (void __user *)data);
+ break;
+#endif
+
default:
ret = ptrace_request(child, request, addr, data);
break;
diff --git a/arch/arm/vfp/vfp.h b/arch/arm/vfp/vfp.h
index 8de86e4..c8c98dd4 100644
--- a/arch/arm/vfp/vfp.h
+++ b/arch/arm/vfp/vfp.h
@@ -377,6 +377,4 @@ struct op {
u32 flags;
};
-#if defined(CONFIG_SMP) || defined(CONFIG_PM)
extern void vfp_save_state(void *location, u32 fpexc);
-#endif
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index c92a08b..a5a4e57 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -172,7 +172,6 @@ process_exception:
@ retry the faulted instruction
ENDPROC(vfp_support_entry)
-#if defined(CONFIG_SMP) || defined(CONFIG_PM)
ENTRY(vfp_save_state)
@ Save the current VFP state
@ r0 - save location
@@ -190,7 +189,6 @@ ENTRY(vfp_save_state)
stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2
mov pc, lr
ENDPROC(vfp_save_state)
-#endif
last_VFP_context_address:
.word last_VFP_context
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 9f476a1..7e12390 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -377,6 +377,55 @@ static void vfp_pm_init(void)
static inline void vfp_pm_init(void) { }
#endif /* CONFIG_PM */
+/*
+ * Synchronise the hardware VFP state of a thread other than current with the
+ * saved one. This function is used by the ptrace mechanism.
+ */
+#ifdef CONFIG_SMP
+void vfp_sync_state(struct thread_info *thread)
+{
+ /*
+ * On SMP systems, the VFP state is automatically saved at every
+ * context switch. We mark the thread VFP state as belonging to a
+ * non-existent CPU so that the saved one will be reloaded when
+ * needed.
+ */
+ thread->vfpstate.hard.cpu = NR_CPUS;
+}
+#else
+void vfp_sync_state(struct thread_info *thread)
+{
+ unsigned int cpu = get_cpu();
+ u32 fpexc = fmrx(FPEXC);
+
+ /*
+ * If VFP is enabled, the previous state was already saved and
+ * last_VFP_context updated.
+ */
+ if (fpexc & FPEXC_EN)
+ goto out;
+
+ if (!last_VFP_context[cpu])
+ goto out;
+
+ /*
+ * Save the last VFP state on this CPU.
+ */
+ fmxr(FPEXC, fpexc | FPEXC_EN);
+ vfp_save_state(last_VFP_context[cpu], fpexc);
+ fmxr(FPEXC, fpexc);
+
+ /*
+ * Set the context to NULL to force a reload the next time the thread
+ * uses the VFP.
+ */
+ last_VFP_context[cpu] = NULL;
+
+out:
+ put_cpu();
+}
+#endif
+
#include <linux/smp.h>
/*
OpenPOWER on IntegriCloud