summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-04-22 21:15:59 -0400
committerDavid S. Miller <davem@davemloft.net>2018-04-22 21:15:59 -0400
commit986e54cd685e8166bc8b4f4c47de44709e541510 (patch)
treef65c405a84425c608db7233819654e70f61d566a
parent660e309ddd6aa99bb4d2a859c4a0b56965e744ef (diff)
parent6ab690aa439803347743c0d899ac422774fdd5e7 (diff)
downloadop-kernel-dev-986e54cd685e8166bc8b4f4c47de44709e541510.zip
op-kernel-dev-986e54cd685e8166bc8b4f4c47de44709e541510.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2018-04-21 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Fix a deadlock between mm->mmap_sem and bpf_event_mutex when one task is detaching a BPF prog via perf_event_detach_bpf_prog() and another one dumping through bpf_prog_array_copy_info(). For the latter we move the copy_to_user() out of the bpf_event_mutex lock to fix it, from Yonghong. 2) Fix test_sock and test_sock_addr.sh failures. The former was hitting rlimit issues and the latter required ping to specify the address family, from Yonghong. 3) Remove a dead check in sockmap's sock_map_alloc(), from Jann. 4) Add generated files to BPF kselftests gitignore that were previously missed, from Anders. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/bpf.h4
-rw-r--r--kernel/bpf/core.c45
-rw-r--r--kernel/bpf/sockmap.c3
-rw-r--r--kernel/trace/bpf_trace.c25
-rw-r--r--tools/testing/selftests/bpf/.gitignore3
-rw-r--r--tools/testing/selftests/bpf/test_sock.c1
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c1
-rwxr-xr-xtools/testing/selftests/bpf/test_sock_addr.sh4
8 files changed, 59 insertions, 27 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 95a7abd..486e65e 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -339,8 +339,8 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
struct bpf_prog *old_prog);
int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
- __u32 __user *prog_ids, u32 request_cnt,
- __u32 __user *prog_cnt);
+ u32 *prog_ids, u32 request_cnt,
+ u32 *prog_cnt);
int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
struct bpf_prog *exclude_prog,
struct bpf_prog *include_prog,
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index d315b39..ba03ec3 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1572,13 +1572,32 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
return cnt;
}
+static bool bpf_prog_array_copy_core(struct bpf_prog **prog,
+ u32 *prog_ids,
+ u32 request_cnt)
+{
+ int i = 0;
+
+ for (; *prog; prog++) {
+ if (*prog == &dummy_bpf_prog.prog)
+ continue;
+ prog_ids[i] = (*prog)->aux->id;
+ if (++i == request_cnt) {
+ prog++;
+ break;
+ }
+ }
+
+ return !!(*prog);
+}
+
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
__u32 __user *prog_ids, u32 cnt)
{
struct bpf_prog **prog;
unsigned long err = 0;
- u32 i = 0, *ids;
bool nospc;
+ u32 *ids;
/* users of this function are doing:
* cnt = bpf_prog_array_length();
@@ -1595,16 +1614,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
return -ENOMEM;
rcu_read_lock();
prog = rcu_dereference(progs)->progs;
- for (; *prog; prog++) {
- if (*prog == &dummy_bpf_prog.prog)
- continue;
- ids[i] = (*prog)->aux->id;
- if (++i == cnt) {
- prog++;
- break;
- }
- }
- nospc = !!(*prog);
+ nospc = bpf_prog_array_copy_core(prog, ids, cnt);
rcu_read_unlock();
err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
kfree(ids);
@@ -1683,22 +1693,25 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
}
int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
- __u32 __user *prog_ids, u32 request_cnt,
- __u32 __user *prog_cnt)
+ u32 *prog_ids, u32 request_cnt,
+ u32 *prog_cnt)
{
+ struct bpf_prog **prog;
u32 cnt = 0;
if (array)
cnt = bpf_prog_array_length(array);
- if (copy_to_user(prog_cnt, &cnt, sizeof(cnt)))
- return -EFAULT;
+ *prog_cnt = cnt;
/* return early if user requested only program count or nothing to copy */
if (!request_cnt || !cnt)
return 0;
- return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt);
+ /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
+ prog = rcu_dereference_check(array, 1)->progs;
+ return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC
+ : 0;
}
static void bpf_prog_free_deferred(struct work_struct *work)
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 8dd9210..a3b2138 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -1442,9 +1442,6 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL);
- if (attr->value_size > KMALLOC_MAX_SIZE)
- return ERR_PTR(-E2BIG);
-
err = bpf_tcp_ulp_register();
if (err && err != -EEXIST)
return ERR_PTR(err);
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index d88e96d..56ba0f2 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -977,6 +977,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
{
struct perf_event_query_bpf __user *uquery = info;
struct perf_event_query_bpf query = {};
+ u32 *ids, prog_cnt, ids_len;
int ret;
if (!capable(CAP_SYS_ADMIN))
@@ -985,16 +986,32 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
return -EINVAL;
if (copy_from_user(&query, uquery, sizeof(query)))
return -EFAULT;
- if (query.ids_len > BPF_TRACE_MAX_PROGS)
+
+ ids_len = query.ids_len;
+ if (ids_len > BPF_TRACE_MAX_PROGS)
return -E2BIG;
+ ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
+ if (!ids)
+ return -ENOMEM;
+ /*
+ * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
+ * is required when user only wants to check for uquery->prog_cnt.
+ * There is no need to check for it since the case is handled
+ * gracefully in bpf_prog_array_copy_info.
+ */
mutex_lock(&bpf_event_mutex);
ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
- uquery->ids,
- query.ids_len,
- &uquery->prog_cnt);
+ ids,
+ ids_len,
+ &prog_cnt);
mutex_unlock(&bpf_event_mutex);
+ if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
+ copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
+ ret = -EFAULT;
+
+ kfree(ids);
return ret;
}
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 9cf83f8..5e1ab2f 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -12,3 +12,6 @@ test_tcpbpf_user
test_verifier_log
feature
test_libbpf_open
+test_sock
+test_sock_addr
+urandom_read
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index 73bb20c..f4d99fa 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -13,6 +13,7 @@
#include <bpf/bpf.h>
#include "cgroup_helpers.h"
+#include "bpf_rlimit.h"
#ifndef ARRAY_SIZE
# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index d488f20..2950f80 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -15,6 +15,7 @@
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
+#include "bpf_rlimit.h"
#define CG_PATH "/foo"
#define CONNECT4_PROG_PATH "./connect4_prog.o"
diff --git a/tools/testing/selftests/bpf/test_sock_addr.sh b/tools/testing/selftests/bpf/test_sock_addr.sh
index c6e1dcf..9832a87 100755
--- a/tools/testing/selftests/bpf/test_sock_addr.sh
+++ b/tools/testing/selftests/bpf/test_sock_addr.sh
@@ -4,7 +4,7 @@ set -eu
ping_once()
{
- ping -q -c 1 -W 1 ${1%%/*} >/dev/null 2>&1
+ ping -${1} -q -c 1 -W 1 ${2%%/*} >/dev/null 2>&1
}
wait_for_ip()
@@ -13,7 +13,7 @@ wait_for_ip()
echo -n "Wait for testing IPv4/IPv6 to become available "
for _i in $(seq ${MAX_PING_TRIES}); do
echo -n "."
- if ping_once ${TEST_IPv4} && ping_once ${TEST_IPv6}; then
+ if ping_once 4 ${TEST_IPv4} && ping_once 6 ${TEST_IPv6}; then
echo " OK"
return
fi
OpenPOWER on IntegriCloud