From e7a510f92c1e482a7db05afd3cb84af1f4cfe0bc Mon Sep 17 00:00:00 2001
From: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Date: Mon, 7 Nov 2005 01:00:12 -0800
Subject: [PATCH] Kprobes: Track kprobe on a per_cpu basis - x86_64 changes

x86_64 changes to track kprobe execution on a per-cpu basis.  We now track the
kprobe state machine independently on each cpu using a arch specific kprobe
control block.

Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
---
 include/asm-x86_64/kprobes.h | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

(limited to 'include/asm-x86_64')

diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h
index 6d6d883..4dd7a7e 100644
--- a/include/asm-x86_64/kprobes.h
+++ b/include/asm-x86_64/kprobes.h
@@ -25,6 +25,7 @@
  */
 #include <linux/types.h>
 #include <linux/ptrace.h>
+#include <linux/percpu.h>
 
 struct pt_regs;
 
@@ -48,6 +49,24 @@ struct arch_specific_insn {
 	kprobe_opcode_t *insn;
 };
 
+struct prev_kprobe {
+	struct kprobe *kp;
+	unsigned long status;
+	unsigned long old_rflags;
+	unsigned long saved_rflags;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+	unsigned long kprobe_status;
+	unsigned long kprobe_old_rflags;
+	unsigned long kprobe_saved_rflags;
+	long *jprobe_saved_rsp;
+	struct pt_regs jprobe_saved_regs;
+	kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
+	struct prev_kprobe prev_kprobe;
+};
+
 /* trap3/1 are intr gates for kprobes.  So, restore the status of IF,
  * if necessary, before executing the original int3/1 (trap) handler.
  */
-- 
cgit v1.1