summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/hw_breakpoint.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.com>2015-09-05 11:07:04 +0200
committerNeilBrown <neilb@suse.com>2015-09-05 11:08:32 +0200
commite89c6fdf9e0eb1b5a03574d4ca73e83eae8deb91 (patch)
treef9df292ed03a5a3c4ddc658ae3646f02a1ffafce /arch/x86/kernel/hw_breakpoint.c
parentc3cce6cda162eb2b2960a85d9c8992f4f3be85d0 (diff)
parent1081230b748de8f03f37f80c53dfa89feda9b8de (diff)
downloadop-kernel-dev-e89c6fdf9e0eb1b5a03574d4ca73e83eae8deb91.zip
op-kernel-dev-e89c6fdf9e0eb1b5a03574d4ca73e83eae8deb91.tar.gz
Merge linux-block/for-4.3/core into md/for-linux
There were a few conflicts that are fairly easy to resolve. Signed-off-by: NeilBrown <neilb@suse.com>
Diffstat (limited to 'arch/x86/kernel/hw_breakpoint.c')
-rw-r--r--arch/x86/kernel/hw_breakpoint.c31
1 files changed, 30 insertions, 1 deletions
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 7114ba2..50a3fad 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -32,6 +32,7 @@
#include <linux/irqflags.h>
#include <linux/notifier.h>
#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
#include <linux/percpu.h>
#include <linux/kdebug.h>
#include <linux/kernel.h>
@@ -179,7 +180,11 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
va = info->address;
len = bp->attr.bp_len;
- return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
+ /*
+ * We don't need to worry about va + len - 1 overflowing:
+ * we already require that va is aligned to a multiple of len.
+ */
+ return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
}
int arch_bp_generic_fields(int x86_len, int x86_type,
@@ -243,6 +248,20 @@ static int arch_build_bp_info(struct perf_event *bp)
info->type = X86_BREAKPOINT_RW;
break;
case HW_BREAKPOINT_X:
+ /*
+ * We don't allow kernel breakpoints in places that are not
+ * acceptable for kprobes. On non-kprobes kernels, we don't
+ * allow kernel breakpoints at all.
+ */
+ if (bp->attr.bp_addr >= TASK_SIZE_MAX) {
+#ifdef CONFIG_KPROBES
+ if (within_kprobe_blacklist(bp->attr.bp_addr))
+ return -EINVAL;
+#else
+ return -EINVAL;
+#endif
+ }
+
info->type = X86_BREAKPOINT_EXECUTE;
/*
* x86 inst breakpoints need to have a specific undefined len.
@@ -276,8 +295,18 @@ static int arch_build_bp_info(struct perf_event *bp)
break;
#endif
default:
+ /* AMD range breakpoint */
if (!is_power_of_2(bp->attr.bp_len))
return -EINVAL;
+ if (bp->attr.bp_addr & (bp->attr.bp_len - 1))
+ return -EINVAL;
+ /*
+ * It's impossible to use a range breakpoint to fake out
+ * user vs kernel detection because bp_len - 1 can't
+ * have the high bit set. If we ever allow range instruction
+ * breakpoints, then we'll have to check for kprobe-blacklisted
+ * addresses anywhere in the range.
+ */
if (!cpu_has_bpext)
return -EOPNOTSUPP;
info->mask = bp->attr.bp_len - 1;
OpenPOWER on IntegriCloud