summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2018-06-14 15:27:37 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-15 07:55:24 +0900
commitdc55daff9040a90adce97208e776ee0bf515ab12 (patch)
tree28b562c0d2a06c901e481d217b85cdf1a24723a6 /kernel
parentc9484b986ef03492357fddd50afbdd02929cfa72 (diff)
downloadop-kernel-dev-dc55daff9040a90adce97208e776ee0bf515ab12.zip
op-kernel-dev-dc55daff9040a90adce97208e776ee0bf515ab12.tar.gz
kcov: prefault the kcov_area
On many architectures the vmalloc area is lazily faulted in upon first access. This is problematic for KCOV, as __sanitizer_cov_trace_pc accesses the (vmalloc'd) kcov_area, and fault handling code may be instrumented. If an access to kcov_area faults, this will result in mutual recursion through the fault handling code and __sanitizer_cov_trace_pc(), eventually leading to stack corruption and/or overflow. We can avoid this by faulting in the kcov_area before __sanitizer_cov_trace_pc() is permitted to access it. Once it has been faulted in, it will remain present in the process page tables, and will not fault again. [akpm@linux-foundation.org: code cleanup] [akpm@linux-foundation.org: add comment explaining kcov_fault_in_area()] [akpm@linux-foundation.org: fancier code comment from Mark] Link: http://lkml.kernel.org/r/20180504135535.53744-3-mark.rutland@arm.com Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kcov.c16
1 files changed, 16 insertions, 0 deletions
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 5be9a60..cf25039 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -324,6 +324,21 @@ static int kcov_close(struct inode *inode, struct file *filep)
return 0;
}
+/*
+ * Fault in a lazily-faulted vmalloc area before it can be used by
+ * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
+ * vmalloc fault handling path is instrumented.
+ */
+static void kcov_fault_in_area(struct kcov *kcov)
+{
+ unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
+ unsigned long *area = kcov->area;
+ unsigned long offset;
+
+ for (offset = 0; offset < kcov->size; offset += stride)
+ READ_ONCE(area[offset]);
+}
+
static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
unsigned long arg)
{
@@ -372,6 +387,7 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
#endif
else
return -EINVAL;
+ kcov_fault_in_area(kcov);
/* Cache in task struct for performance. */
t->kcov_size = kcov->size;
t->kcov_area = kcov->area;
OpenPOWER on IntegriCloud