summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2018-06-15 02:30:48 +0200
committerAlexei Starovoitov <ast@kernel.org>2018-06-15 11:14:25 -0700
commit9facc336876f7ecf9edba4c67b90426fde4ec898 (patch)
treee2a0afa2546dd672460694cd8e08346d6195fa07 /include
parent7d1982b4e335c1b184406b7566f6041bfe313c35 (diff)
downloadop-kernel-dev-9facc336876f7ecf9edba4c67b90426fde4ec898.zip
op-kernel-dev-9facc336876f7ecf9edba4c67b90426fde4ec898.tar.gz
bpf: reject any prog that failed read-only lock
We currently lock any JITed image as read-only via bpf_jit_binary_lock_ro() as well as the BPF image as read-only through bpf_prog_lock_ro(). In the case any of these would fail we throw a WARN_ON_ONCE() in order to yell loudly to the log. Perhaps, to some extend, this may be comparable to an allocation where __GFP_NOWARN is explicitly not set. Added via 65869a47f348 ("bpf: improve read-only handling"), this behavior is slightly different compared to any of the other in-kernel set_memory_ro() users who do not check the return code of set_memory_ro() and friends /at all/ (e.g. in the case of module_enable_ro() / module_disable_ro()). Given in BPF this is mandatory hardening step, we want to know whether there are any issues that would leave both BPF data writable. So it happens that syzkaller enabled fault injection and it triggered memory allocation failure deep inside x86's change_page_attr_set_clr() which was triggered from set_memory_ro(). Now, there are two options: i) leaving everything as is, and ii) reworking the image locking code in order to have a final checkpoint out of the central bpf_prog_select_runtime() which probes whether any of the calls during prog setup weren't successful, and then bailing out with an error. Option ii) is a better approach since this additional paranoia avoids altogether leaving any potential W+X pages from BPF side in the system. Therefore, lets be strict about it, and reject programs in such unlikely occasion. While testing I noticed also that one bpf_prog_lock_ro() call was missing on the outer dummy prog in case of calls, e.g. in the destructor we call bpf_prog_free_deferred() on the main prog where we try to bpf_prog_unlock_free() the program, and since we go via bpf_prog_select_runtime() do that as well. Reported-by: syzbot+3b889862e65a98317058@syzkaller.appspotmail.com Reported-by: syzbot+9e762b52dd17e616a7a5@syzkaller.appspotmail.com Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/filter.h60
1 files changed, 38 insertions, 22 deletions
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 297c56f..108f981 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -469,7 +469,8 @@ struct sock_fprog_kern {
};
struct bpf_binary_header {
- unsigned int pages;
+ u16 pages;
+ u16 locked:1;
u8 image[];
};
@@ -671,15 +672,18 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
fp->locked = 1;
- WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
+ if (set_memory_ro((unsigned long)fp, fp->pages))
+ fp->locked = 0;
+#endif
}
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
if (fp->locked) {
WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
/* In case set_memory_rw() fails, we want to be the first
@@ -687,34 +691,30 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
*/
fp->locked = 0;
}
+#endif
}
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
- WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
-}
-
-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
-{
- WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
-}
-#else
-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
-{
-}
-
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
-{
-}
-
-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
-{
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+ hdr->locked = 1;
+ if (set_memory_ro((unsigned long)hdr, hdr->pages))
+ hdr->locked = 0;
+#endif
}
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
{
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+ if (hdr->locked) {
+ WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
+ /* In case set_memory_rw() fails, we want to be the first
+ * to crash here instead of some random place later on.
+ */
+ hdr->locked = 0;
+ }
+#endif
}
-#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog *fp)
@@ -725,6 +725,22 @@ bpf_jit_binary_hdr(const struct bpf_prog *fp)
return (void *)addr;
}
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+static inline int bpf_prog_check_pages_ro_single(const struct bpf_prog *fp)
+{
+ if (!fp->locked)
+ return -ENOLCK;
+ if (fp->jited) {
+ const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
+
+ if (!hdr->locked)
+ return -ENOLCK;
+ }
+
+ return 0;
+}
+#endif
+
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
{
OpenPOWER on IntegriCloud