From f984ba4eb575e4a27ed28a76d4126d2aa9233c32 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 3 Dec 2010 18:54:34 +0900 Subject: kprobes: Use text_poke_smp_batch for unoptimizing Use text_poke_smp_batch() on unoptimization path for reducing the number of stop_machine() issues. If the number of unoptimizing probes is more than MAX_OPTIMIZE_PROBES(=256), kprobes unoptimizes first MAX_OPTIMIZE_PROBES probes and kicks optimizer for remaining probes. Signed-off-by: Masami Hiramatsu Cc: Rusty Russell Cc: Frederic Weisbecker Cc: Ananth N Mavinakayanahalli Cc: Jason Baron Cc: Mathieu Desnoyers Cc: 2nddept-manager@sdl.hitachi.co.jp Cc: Peter Zijlstra Cc: Steven Rostedt LKML-Reference: <20101203095434.2961.22657.stgit@ltc236.sdl.hitachi.co.jp> Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes.c | 40 ++++++++++++++++++++++++++++++++++++++++ include/linux/kprobes.h | 2 ++ kernel/kprobes.c | 10 ++++------ 3 files changed, 46 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 25a8af7..5940282b 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -1457,6 +1457,46 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist) text_poke_smp_batch(jump_poke_params, c); } +static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm, + u8 *insn_buf, + struct optimized_kprobe *op) +{ + /* Set int3 to first byte for kprobes */ + insn_buf[0] = BREAKPOINT_INSTRUCTION; + memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); + + tprm->addr = op->kp.addr; + tprm->opcode = insn_buf; + tprm->len = RELATIVEJUMP_SIZE; +} + +/* + * Recover original instructions and breakpoints from relative jumps. + * Caller must call with locking kprobe_mutex. + */ +extern void arch_unoptimize_kprobes(struct list_head *oplist, + struct list_head *done_list) +{ + struct optimized_kprobe *op, *tmp; + int c = 0; + + list_for_each_entry_safe(op, tmp, oplist, list) { + /* Setup param */ + setup_unoptimize_kprobe(&jump_poke_params[c], + jump_poke_bufs[c].buf, op); + list_move(&op->list, done_list); + if (++c >= MAX_OPTIMIZE_PROBES) + break; + } + + /* + * text_poke_smp doesn't support NMI/MCE code modifying. + * However, since kprobes itself also doesn't support NMI/MCE + * code probing, it's not a problem. + */ + text_poke_smp_batch(jump_poke_params, c); +} + /* Replace a relative jump with a breakpoint (int3). */ void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) { diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index fe157ba..b78edb5 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -276,6 +276,8 @@ extern int arch_check_optimized_kprobe(struct optimized_kprobe *op); extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op); extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op); extern void arch_optimize_kprobes(struct list_head *oplist); +extern void arch_unoptimize_kprobes(struct list_head *oplist, + struct list_head *done_list); extern void arch_unoptimize_kprobe(struct optimized_kprobe *op); extern kprobe_opcode_t *get_optinsn_slot(void); extern void free_optinsn_slot(kprobe_opcode_t *slot, int dirty); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 531e101..7663e5d 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -517,9 +517,9 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) /* Ditto to do_optimize_kprobes */ get_online_cpus(); mutex_lock(&text_mutex); - list_for_each_entry_safe(op, tmp, &unoptimizing_list, list) { - /* Unoptimize kprobes */ - arch_unoptimize_kprobe(op); + arch_unoptimize_kprobes(&unoptimizing_list, free_list); + /* Loop free_list for disarming */ + list_for_each_entry_safe(op, tmp, free_list, list) { /* Disarm probes if marked disabled */ if (kprobe_disabled(&op->kp)) arch_disarm_kprobe(&op->kp); @@ -530,8 +530,6 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) * (reclaiming is done by do_free_cleaned_kprobes.) */ hlist_del_rcu(&op->kp.hlist); - /* Move only unused probes on free_list */ - list_move(&op->list, free_list); } else list_del_init(&op->list); } @@ -592,7 +590,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) mutex_unlock(&module_mutex); /* Step 5: Kick optimizer again if needed */ - if (!list_empty(&optimizing_list)) + if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) kick_kprobe_optimizer(); else /* Wake up all waiters */ -- cgit v1.1