summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2018-02-21 08:48:35 +0100
committerIngo Molnar <mingo@kernel.org>2018-02-21 08:48:35 +0100
commited029343951f9f0c8600a484841c67e6c129717b (patch)
treec66694a3f7a5c1f222e76f8f895b5fcc47ffe1f9 /kernel
parent387f77cc8249c847b4fa4d8c93694818b79efee3 (diff)
parent91ab883eb21325ad80f3473633f794c78ac87f51 (diff)
downloadop-kernel-dev-ed029343951f9f0c8600a484841c67e6c129717b.zip
op-kernel-dev-ed029343951f9f0c8600a484841c67e6c129717b.tar.gz
Merge tag 'v4.16-rc2' into sched/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/sockmap.c187
-rw-r--r--kernel/configs/kvm_guest.config1
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/events/ring_buffer.c2
-rw-r--r--kernel/irq/irqdomain.c18
-rw-r--r--kernel/kprobes.c178
-rw-r--r--kernel/locking/qspinlock.c21
-rw-r--r--kernel/printk/printk.c6
-rw-r--r--kernel/relay.c4
-rw-r--r--kernel/sched/core.c27
-rw-r--r--kernel/sched/cpufreq_schedutil.c2
-rw-r--r--kernel/sched/deadline.c6
-rw-r--r--kernel/sched/rt.c3
-rw-r--r--kernel/time/posix-clock.c2
-rw-r--r--kernel/trace/ftrace.c1
-rw-r--r--kernel/trace/ring_buffer.c4
-rw-r--r--kernel/trace/trace.c4
-rw-r--r--kernel/trace/trace_events_filter.c9
18 files changed, 301 insertions, 176 deletions
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 0314d17..48c3341 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -86,9 +86,10 @@ struct smap_psock {
struct work_struct tx_work;
struct work_struct gc_work;
+ struct proto *sk_proto;
+ void (*save_close)(struct sock *sk, long timeout);
void (*save_data_ready)(struct sock *sk);
void (*save_write_space)(struct sock *sk);
- void (*save_state_change)(struct sock *sk);
};
static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
@@ -96,12 +97,102 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
return rcu_dereference_sk_user_data(sk);
}
+static struct proto tcp_bpf_proto;
+static int bpf_tcp_init(struct sock *sk)
+{
+ struct smap_psock *psock;
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ if (unlikely(psock->sk_proto)) {
+ rcu_read_unlock();
+ return -EBUSY;
+ }
+
+ psock->save_close = sk->sk_prot->close;
+ psock->sk_proto = sk->sk_prot;
+ sk->sk_prot = &tcp_bpf_proto;
+ rcu_read_unlock();
+ return 0;
+}
+
+static void bpf_tcp_release(struct sock *sk)
+{
+ struct smap_psock *psock;
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+
+ if (likely(psock)) {
+ sk->sk_prot = psock->sk_proto;
+ psock->sk_proto = NULL;
+ }
+ rcu_read_unlock();
+}
+
+static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
+
+static void bpf_tcp_close(struct sock *sk, long timeout)
+{
+ void (*close_fun)(struct sock *sk, long timeout);
+ struct smap_psock_map_entry *e, *tmp;
+ struct smap_psock *psock;
+ struct sock *osk;
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock)) {
+ rcu_read_unlock();
+ return sk->sk_prot->close(sk, timeout);
+ }
+
+ /* The psock may be destroyed anytime after exiting the RCU critial
+ * section so by the time we use close_fun the psock may no longer
+ * be valid. However, bpf_tcp_close is called with the sock lock
+ * held so the close hook and sk are still valid.
+ */
+ close_fun = psock->save_close;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ list_for_each_entry_safe(e, tmp, &psock->maps, list) {
+ osk = cmpxchg(e->entry, sk, NULL);
+ if (osk == sk) {
+ list_del(&e->list);
+ smap_release_sock(psock, sk);
+ }
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+ rcu_read_unlock();
+ close_fun(sk, timeout);
+}
+
enum __sk_action {
__SK_DROP = 0,
__SK_PASS,
__SK_REDIRECT,
};
+static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = {
+ .name = "bpf_tcp",
+ .uid = TCP_ULP_BPF,
+ .user_visible = false,
+ .owner = NULL,
+ .init = bpf_tcp_init,
+ .release = bpf_tcp_release,
+};
+
+static int bpf_tcp_ulp_register(void)
+{
+ tcp_bpf_proto = tcp_prot;
+ tcp_bpf_proto.close = bpf_tcp_close;
+ return tcp_register_ulp(&bpf_tcp_ulp_ops);
+}
+
static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
{
struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
@@ -166,68 +257,6 @@ static void smap_report_sk_error(struct smap_psock *psock, int err)
sk->sk_error_report(sk);
}
-static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
-
-/* Called with lock_sock(sk) held */
-static void smap_state_change(struct sock *sk)
-{
- struct smap_psock_map_entry *e, *tmp;
- struct smap_psock *psock;
- struct socket_wq *wq;
- struct sock *osk;
-
- rcu_read_lock();
-
- /* Allowing transitions into an established syn_recv states allows
- * for early binding sockets to a smap object before the connection
- * is established.
- */
- switch (sk->sk_state) {
- case TCP_SYN_SENT:
- case TCP_SYN_RECV:
- case TCP_ESTABLISHED:
- break;
- case TCP_CLOSE_WAIT:
- case TCP_CLOSING:
- case TCP_LAST_ACK:
- case TCP_FIN_WAIT1:
- case TCP_FIN_WAIT2:
- case TCP_LISTEN:
- break;
- case TCP_CLOSE:
- /* Only release if the map entry is in fact the sock in
- * question. There is a case where the operator deletes
- * the sock from the map, but the TCP sock is closed before
- * the psock is detached. Use cmpxchg to verify correct
- * sock is removed.
- */
- psock = smap_psock_sk(sk);
- if (unlikely(!psock))
- break;
- write_lock_bh(&sk->sk_callback_lock);
- list_for_each_entry_safe(e, tmp, &psock->maps, list) {
- osk = cmpxchg(e->entry, sk, NULL);
- if (osk == sk) {
- list_del(&e->list);
- smap_release_sock(psock, sk);
- }
- }
- write_unlock_bh(&sk->sk_callback_lock);
- break;
- default:
- psock = smap_psock_sk(sk);
- if (unlikely(!psock))
- break;
- smap_report_sk_error(psock, EPIPE);
- break;
- }
-
- wq = rcu_dereference(sk->sk_wq);
- if (skwq_has_sleeper(wq))
- wake_up_interruptible_all(&wq->wait);
- rcu_read_unlock();
-}
-
static void smap_read_sock_strparser(struct strparser *strp,
struct sk_buff *skb)
{
@@ -322,10 +351,8 @@ static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
return;
sk->sk_data_ready = psock->save_data_ready;
sk->sk_write_space = psock->save_write_space;
- sk->sk_state_change = psock->save_state_change;
psock->save_data_ready = NULL;
psock->save_write_space = NULL;
- psock->save_state_change = NULL;
strp_stop(&psock->strp);
psock->strp_enabled = false;
}
@@ -350,6 +377,7 @@ static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
if (psock->refcnt)
return;
+ tcp_cleanup_ulp(sock);
smap_stop_sock(psock, sock);
clear_bit(SMAP_TX_RUNNING, &psock->state);
rcu_assign_sk_user_data(sock, NULL);
@@ -427,10 +455,8 @@ static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
return;
psock->save_data_ready = sk->sk_data_ready;
psock->save_write_space = sk->sk_write_space;
- psock->save_state_change = sk->sk_state_change;
sk->sk_data_ready = smap_data_ready;
sk->sk_write_space = smap_write_space;
- sk->sk_state_change = smap_state_change;
psock->strp_enabled = true;
}
@@ -509,6 +535,10 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
if (attr->value_size > KMALLOC_MAX_SIZE)
return ERR_PTR(-E2BIG);
+ err = bpf_tcp_ulp_register();
+ if (err && err != -EEXIST)
+ return ERR_PTR(err);
+
stab = kzalloc(sizeof(*stab), GFP_USER);
if (!stab)
return ERR_PTR(-ENOMEM);
@@ -590,11 +620,6 @@ static void sock_map_free(struct bpf_map *map)
}
rcu_read_unlock();
- if (stab->bpf_verdict)
- bpf_prog_put(stab->bpf_verdict);
- if (stab->bpf_parse)
- bpf_prog_put(stab->bpf_parse);
-
sock_map_remove_complete(stab);
}
@@ -754,6 +779,10 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
goto out_progs;
}
+ err = tcp_set_ulp_id(sock, TCP_ULP_BPF);
+ if (err)
+ goto out_progs;
+
set_bit(SMAP_TX_RUNNING, &psock->state);
}
@@ -866,6 +895,19 @@ static int sock_map_update_elem(struct bpf_map *map,
return err;
}
+static void sock_map_release(struct bpf_map *map, struct file *map_file)
+{
+ struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
+ struct bpf_prog *orig;
+
+ orig = xchg(&stab->bpf_parse, NULL);
+ if (orig)
+ bpf_prog_put(orig);
+ orig = xchg(&stab->bpf_verdict, NULL);
+ if (orig)
+ bpf_prog_put(orig);
+}
+
const struct bpf_map_ops sock_map_ops = {
.map_alloc = sock_map_alloc,
.map_free = sock_map_free,
@@ -873,6 +915,7 @@ const struct bpf_map_ops sock_map_ops = {
.map_get_next_key = sock_map_get_next_key,
.map_update_elem = sock_map_update_elem,
.map_delete_elem = sock_map_delete_elem,
+ .map_release = sock_map_release,
};
BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
diff --git a/kernel/configs/kvm_guest.config b/kernel/configs/kvm_guest.config
index 8d96437..108fecc 100644
--- a/kernel/configs/kvm_guest.config
+++ b/kernel/configs/kvm_guest.config
@@ -18,6 +18,7 @@ CONFIG_VIRTUALIZATION=y
CONFIG_HYPERVISOR_GUEST=y
CONFIG_PARAVIRT=y
CONFIG_KVM_GUEST=y
+CONFIG_S390_GUEST=y
CONFIG_VIRTIO=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BLK=y
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f0549e7..96db9ae 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4524,7 +4524,7 @@ static __poll_t perf_poll(struct file *file, poll_table *wait)
{
struct perf_event *event = file->private_data;
struct ring_buffer *rb;
- __poll_t events = POLLHUP;
+ __poll_t events = EPOLLHUP;
poll_wait(file, &event->waitq, wait);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 141aa2c..6c6b3c4 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -19,7 +19,7 @@
static void perf_output_wakeup(struct perf_output_handle *handle)
{
- atomic_set(&handle->rb->poll, POLLIN);
+ atomic_set(&handle->rb->poll, EPOLLIN);
handle->event->pending_wakeup = 1;
irq_work_queue(&handle->event->pending);
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index e6a9c36..82b8b18 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1726,25 +1726,14 @@ static int irq_domain_debug_show(struct seq_file *m, void *p)
irq_domain_debug_show_one(m, d, 0);
return 0;
}
-
-static int irq_domain_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, irq_domain_debug_show, inode->i_private);
-}
-
-static const struct file_operations dfs_domain_ops = {
- .open = irq_domain_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(irq_domain_debug);
static void debugfs_add_domain_dir(struct irq_domain *d)
{
if (!d->name || !domain_dir || d->debugfs_file)
return;
d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d,
- &dfs_domain_ops);
+ &irq_domain_debug_fops);
}
static void debugfs_remove_domain_dir(struct irq_domain *d)
@@ -1760,7 +1749,8 @@ void __init irq_domain_debugfs_init(struct dentry *root)
if (!domain_dir)
return;
- debugfs_create_file("default", 0444, domain_dir, NULL, &dfs_domain_ops);
+ debugfs_create_file("default", 0444, domain_dir, NULL,
+ &irq_domain_debug_fops);
mutex_lock(&irq_domain_mutex);
list_for_each_entry(d, &irq_domain_list, link)
debugfs_add_domain_dir(d);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index da2ccf1..102160f 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -978,67 +978,90 @@ static int prepare_kprobe(struct kprobe *p)
}
/* Caller must lock kprobe_mutex */
-static void arm_kprobe_ftrace(struct kprobe *p)
+static int arm_kprobe_ftrace(struct kprobe *p)
{
- int ret;
+ int ret = 0;
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
(unsigned long)p->addr, 0, 0);
- WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
- kprobe_ftrace_enabled++;
- if (kprobe_ftrace_enabled == 1) {
+ if (ret) {
+ pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+ return ret;
+ }
+
+ if (kprobe_ftrace_enabled == 0) {
ret = register_ftrace_function(&kprobe_ftrace_ops);
- WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
+ if (ret) {
+ pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
+ goto err_ftrace;
+ }
}
+
+ kprobe_ftrace_enabled++;
+ return ret;
+
+err_ftrace:
+ /*
+ * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a
+ * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental
+ * empty filter_hash which would undesirably trace all functions.
+ */
+ ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
+ return ret;
}
/* Caller must lock kprobe_mutex */
-static void disarm_kprobe_ftrace(struct kprobe *p)
+static int disarm_kprobe_ftrace(struct kprobe *p)
{
- int ret;
+ int ret = 0;
- kprobe_ftrace_enabled--;
- if (kprobe_ftrace_enabled == 0) {
+ if (kprobe_ftrace_enabled == 1) {
ret = unregister_ftrace_function(&kprobe_ftrace_ops);
- WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
+ if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret))
+ return ret;
}
+
+ kprobe_ftrace_enabled--;
+
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
(unsigned long)p->addr, 1, 0);
WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+ return ret;
}
#else /* !CONFIG_KPROBES_ON_FTRACE */
#define prepare_kprobe(p) arch_prepare_kprobe(p)
-#define arm_kprobe_ftrace(p) do {} while (0)
-#define disarm_kprobe_ftrace(p) do {} while (0)
+#define arm_kprobe_ftrace(p) (-ENODEV)
+#define disarm_kprobe_ftrace(p) (-ENODEV)
#endif
/* Arm a kprobe with text_mutex */
-static void arm_kprobe(struct kprobe *kp)
+static int arm_kprobe(struct kprobe *kp)
{
- if (unlikely(kprobe_ftrace(kp))) {
- arm_kprobe_ftrace(kp);
- return;
- }
+ if (unlikely(kprobe_ftrace(kp)))
+ return arm_kprobe_ftrace(kp);
+
cpus_read_lock();
mutex_lock(&text_mutex);
__arm_kprobe(kp);
mutex_unlock(&text_mutex);
cpus_read_unlock();
+
+ return 0;
}
/* Disarm a kprobe with text_mutex */
-static void disarm_kprobe(struct kprobe *kp, bool reopt)
+static int disarm_kprobe(struct kprobe *kp, bool reopt)
{
- if (unlikely(kprobe_ftrace(kp))) {
- disarm_kprobe_ftrace(kp);
- return;
- }
+ if (unlikely(kprobe_ftrace(kp)))
+ return disarm_kprobe_ftrace(kp);
cpus_read_lock();
mutex_lock(&text_mutex);
__disarm_kprobe(kp, reopt);
mutex_unlock(&text_mutex);
cpus_read_unlock();
+
+ return 0;
}
/*
@@ -1362,9 +1385,15 @@ out:
if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
ap->flags &= ~KPROBE_FLAG_DISABLED;
- if (!kprobes_all_disarmed)
+ if (!kprobes_all_disarmed) {
/* Arm the breakpoint again. */
- arm_kprobe(ap);
+ ret = arm_kprobe(ap);
+ if (ret) {
+ ap->flags |= KPROBE_FLAG_DISABLED;
+ list_del_rcu(&p->list);
+ synchronize_sched();
+ }
+ }
}
return ret;
}
@@ -1573,8 +1602,14 @@ int register_kprobe(struct kprobe *p)
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
- if (!kprobes_all_disarmed && !kprobe_disabled(p))
- arm_kprobe(p);
+ if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
+ ret = arm_kprobe(p);
+ if (ret) {
+ hlist_del_rcu(&p->hlist);
+ synchronize_sched();
+ goto out;
+ }
+ }
/* Try to optimize kprobe */
try_to_optimize_kprobe(p);
@@ -1608,11 +1643,12 @@ static int aggr_kprobe_disabled(struct kprobe *ap)
static struct kprobe *__disable_kprobe(struct kprobe *p)
{
struct kprobe *orig_p;
+ int ret;
/* Get an original kprobe for return */
orig_p = __get_valid_kprobe(p);
if (unlikely(orig_p == NULL))
- return NULL;
+ return ERR_PTR(-EINVAL);
if (!kprobe_disabled(p)) {
/* Disable probe if it is a child probe */
@@ -1626,8 +1662,13 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
* should have already been disarmed, so
* skip unneed disarming process.
*/
- if (!kprobes_all_disarmed)
- disarm_kprobe(orig_p, true);
+ if (!kprobes_all_disarmed) {
+ ret = disarm_kprobe(orig_p, true);
+ if (ret) {
+ p->flags &= ~KPROBE_FLAG_DISABLED;
+ return ERR_PTR(ret);
+ }
+ }
orig_p->flags |= KPROBE_FLAG_DISABLED;
}
}
@@ -1644,8 +1685,8 @@ static int __unregister_kprobe_top(struct kprobe *p)
/* Disable kprobe. This will disarm it if needed. */
ap = __disable_kprobe(p);
- if (ap == NULL)
- return -EINVAL;
+ if (IS_ERR(ap))
+ return PTR_ERR(ap);
if (ap == p)
/*
@@ -2078,12 +2119,14 @@ static void kill_kprobe(struct kprobe *p)
int disable_kprobe(struct kprobe *kp)
{
int ret = 0;
+ struct kprobe *p;
mutex_lock(&kprobe_mutex);
/* Disable this kprobe */
- if (__disable_kprobe(kp) == NULL)
- ret = -EINVAL;
+ p = __disable_kprobe(kp);
+ if (IS_ERR(p))
+ ret = PTR_ERR(p);
mutex_unlock(&kprobe_mutex);
return ret;
@@ -2116,7 +2159,9 @@ int enable_kprobe(struct kprobe *kp)
if (!kprobes_all_disarmed && kprobe_disabled(p)) {
p->flags &= ~KPROBE_FLAG_DISABLED;
- arm_kprobe(p);
+ ret = arm_kprobe(p);
+ if (ret)
+ p->flags |= KPROBE_FLAG_DISABLED;
}
out:
mutex_unlock(&kprobe_mutex);
@@ -2407,11 +2452,12 @@ static const struct file_operations debugfs_kprobe_blacklist_ops = {
.release = seq_release,
};
-static void arm_all_kprobes(void)
+static int arm_all_kprobes(void)
{
struct hlist_head *head;
struct kprobe *p;
- unsigned int i;
+ unsigned int i, total = 0, errors = 0;
+ int err, ret = 0;
mutex_lock(&kprobe_mutex);
@@ -2428,46 +2474,74 @@ static void arm_all_kprobes(void)
/* Arming kprobes doesn't optimize kprobe itself */
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
- hlist_for_each_entry_rcu(p, head, hlist)
- if (!kprobe_disabled(p))
- arm_kprobe(p);
+ /* Arm all kprobes on a best-effort basis */
+ hlist_for_each_entry_rcu(p, head, hlist) {
+ if (!kprobe_disabled(p)) {
+ err = arm_kprobe(p);
+ if (err) {
+ errors++;
+ ret = err;
+ }
+ total++;
+ }
+ }
}
- printk(KERN_INFO "Kprobes globally enabled\n");
+ if (errors)
+ pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
+ errors, total);
+ else
+ pr_info("Kprobes globally enabled\n");
already_enabled:
mutex_unlock(&kprobe_mutex);
- return;
+ return ret;
}
-static void disarm_all_kprobes(void)
+static int disarm_all_kprobes(void)
{
struct hlist_head *head;
struct kprobe *p;
- unsigned int i;
+ unsigned int i, total = 0, errors = 0;
+ int err, ret = 0;
mutex_lock(&kprobe_mutex);
/* If kprobes are already disarmed, just return */
if (kprobes_all_disarmed) {
mutex_unlock(&kprobe_mutex);
- return;
+ return 0;
}
kprobes_all_disarmed = true;
- printk(KERN_INFO "Kprobes globally disabled\n");
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
+ /* Disarm all kprobes on a best-effort basis */
hlist_for_each_entry_rcu(p, head, hlist) {
- if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
- disarm_kprobe(p, false);
+ if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
+ err = disarm_kprobe(p, false);
+ if (err) {
+ errors++;
+ ret = err;
+ }
+ total++;
+ }
}
}
+
+ if (errors)
+ pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n",
+ errors, total);
+ else
+ pr_info("Kprobes globally disabled\n");
+
mutex_unlock(&kprobe_mutex);
/* Wait for disarming all kprobes by optimizer */
wait_for_kprobe_optimizer();
+
+ return ret;
}
/*
@@ -2494,6 +2568,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
{
char buf[32];
size_t buf_size;
+ int ret = 0;
buf_size = min(count, (sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
@@ -2504,17 +2579,20 @@ static ssize_t write_enabled_file_bool(struct file *file,
case 'y':
case 'Y':
case '1':
- arm_all_kprobes();
+ ret = arm_all_kprobes();
break;
case 'n':
case 'N':
case '0':
- disarm_all_kprobes();
+ ret = disarm_all_kprobes();
break;
default:
return -EINVAL;
}
+ if (ret)
+ return ret;
+
return count;
}
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 38ece03..d880296 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -379,6 +379,14 @@ queue:
tail = encode_tail(smp_processor_id(), idx);
node += idx;
+
+ /*
+ * Ensure that we increment the head node->count before initialising
+ * the actual node. If the compiler is kind enough to reorder these
+ * stores, then an IRQ could overwrite our assignments.
+ */
+ barrier();
+
node->locked = 0;
node->next = NULL;
pv_init_node(node);
@@ -408,14 +416,15 @@ queue:
*/
if (old & _Q_TAIL_MASK) {
prev = decode_tail(old);
+
/*
- * The above xchg_tail() is also a load of @lock which
- * generates, through decode_tail(), a pointer. The address
- * dependency matches the RELEASE of xchg_tail() such that
- * the subsequent access to @prev happens after.
+ * We must ensure that the stores to @node are observed before
+ * the write to prev->next. The address dependency from
+ * xchg_tail is not sufficient to ensure this because the read
+ * component of xchg_tail is unordered with respect to the
+ * initialisation of @node.
*/
-
- WRITE_ONCE(prev->next, node);
+ smp_store_release(&prev->next, node);
pv_wait_node(node, prev);
arch_mcs_spin_lock_contended(&node->locked);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index db4b9b8..fc11235 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -930,7 +930,7 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
__poll_t ret = 0;
if (!user)
- return POLLERR|POLLNVAL;
+ return EPOLLERR|EPOLLNVAL;
poll_wait(file, &log_wait, wait);
@@ -938,9 +938,9 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
if (user->seq < log_next_seq) {
/* return error when data has vanished underneath us */
if (user->seq < log_first_seq)
- ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI;
+ ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
else
- ret = POLLIN|POLLRDNORM;
+ ret = EPOLLIN|EPOLLRDNORM;
}
logbuf_unlock_irq();
diff --git a/kernel/relay.c b/kernel/relay.c
index f7f40a6..c302940 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -924,12 +924,12 @@ static __poll_t relay_file_poll(struct file *filp, poll_table *wait)
struct rchan_buf *buf = filp->private_data;
if (buf->finalized)
- return POLLERR;
+ return EPOLLERR;
if (filp->f_mode & FMODE_READ) {
poll_wait(filp, &buf->read_wait, wait);
if (!relay_buf_empty(buf))
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
}
return mask;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index bf724c1..e7c535e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2601,19 +2601,31 @@ static inline void finish_task(struct task_struct *prev)
#endif
}
-static inline void finish_lock_switch(struct rq *rq)
+static inline void
+prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
{
+ /*
+ * Since the runqueue lock will be released by the next
+ * task (which is an invalid locking op but in the case
+ * of the scheduler it's an obvious special-case), so we
+ * do an early lockdep release here:
+ */
+ rq_unpin_lock(rq, rf);
+ spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
- rq->lock.owner = current;
+ rq->lock.owner = next;
#endif
+}
+
+static inline void finish_lock_switch(struct rq *rq)
+{
/*
* If we are tracking spinlock dependencies then we have to
* fix up the runqueue lock - which gets 'carried over' from
* prev into current:
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-
raw_spin_unlock_irq(&rq->lock);
}
@@ -2844,14 +2856,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
- /*
- * Since the runqueue lock will be released by the next
- * task (which is an invalid locking op but in the case
- * of the scheduler it's an obvious special-case), so we
- * do an early lockdep release here:
- */
- rq_unpin_lock(rq, rf);
- spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
+ prepare_lock_switch(rq, next, rf);
/* Here we just switch the register state and the stack. */
switch_to(prev, next, prev);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index dd062a1..7936f54 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -19,8 +19,6 @@
#include "sched.h"
-#define SUGOV_KTHREAD_PRIORITY 50
-
struct sugov_tunables {
struct gov_attr_set attr_set;
unsigned int rate_limit_us;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 9bb0e0c..9df0978 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1153,6 +1153,7 @@ static void update_curr_dl(struct rq *rq)
struct sched_dl_entity *dl_se = &curr->dl;
u64 delta_exec, scaled_delta_exec;
int cpu = cpu_of(rq);
+ u64 now;
if (!dl_task(curr) || !on_dl_rq(dl_se))
return;
@@ -1165,7 +1166,8 @@ static void update_curr_dl(struct rq *rq)
* natural solution, but the full ramifications of this
* approach need further study.
*/
- delta_exec = rq_clock_task(rq) - curr->se.exec_start;
+ now = rq_clock_task(rq);
+ delta_exec = now - curr->se.exec_start;
if (unlikely((s64)delta_exec <= 0)) {
if (unlikely(dl_se->dl_yielded))
goto throttle;
@@ -1178,7 +1180,7 @@ static void update_curr_dl(struct rq *rq)
curr->se.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);
- curr->se.exec_start = rq_clock_task(rq);
+ curr->se.exec_start = now;
cgroup_account_cputime(curr, delta_exec);
sched_rt_avg_update(rq, delta_exec);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 663b235..aad49451 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -950,12 +950,13 @@ static void update_curr_rt(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct sched_rt_entity *rt_se = &curr->rt;
- u64 now = rq_clock_task(rq);
u64 delta_exec;
+ u64 now;
if (curr->sched_class != &rt_sched_class)
return;
+ now = rq_clock_task(rq);
delta_exec = now - curr->se.exec_start;
if (unlikely((s64)delta_exec <= 0))
return;
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index 94ad46d..fe56c4e 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -74,7 +74,7 @@ static __poll_t posix_clock_poll(struct file *fp, poll_table *wait)
__poll_t result = 0;
if (!clk)
- return POLLERR;
+ return EPOLLERR;
if (clk->ops.poll)
result = clk->ops.poll(clk, fp, wait);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index dabd9d1..eac9ce2 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4456,7 +4456,6 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
func_g.type = filter_parse_regex(glob, strlen(glob),
&func_g.search, &not);
func_g.len = strlen(func_g.search);
- func_g.search = glob;
/* we do not support '!' for function probes */
if (WARN_ON(not))
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index ca6930e..dcf1c4d 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -627,7 +627,7 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
* as data is added to any of the @buffer's cpu buffers. Otherwise
* it will wait for data to be added to a specific cpu buffer.
*
- * Returns POLLIN | POLLRDNORM if data exists in the buffers,
+ * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
* zero otherwise.
*/
__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
@@ -665,7 +665,7 @@ __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
(cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
- return POLLIN | POLLRDNORM;
+ return EPOLLIN | EPOLLRDNORM;
return 0;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 5660853..20a2300 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -5623,13 +5623,13 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl
/* Iterators are static, they should be filled or empty */
if (trace_buffer_iter(iter, iter->cpu_file))
- return POLLIN | POLLRDNORM;
+ return EPOLLIN | EPOLLRDNORM;
if (tr->trace_flags & TRACE_ITER_BLOCK)
/*
* Always select as readable when in blocking mode
*/
- return POLLIN | POLLRDNORM;
+ return EPOLLIN | EPOLLRDNORM;
else
return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
filp, poll_table);
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 61e7f06..a764aec 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -400,7 +400,6 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
for (i = 0; i < len; i++) {
if (buff[i] == '*') {
if (!i) {
- *search = buff + 1;
type = MATCH_END_ONLY;
} else if (i == len - 1) {
if (type == MATCH_END_ONLY)
@@ -410,14 +409,14 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
buff[i] = 0;
break;
} else { /* pattern continues, use full glob */
- type = MATCH_GLOB;
- break;
+ return MATCH_GLOB;
}
} else if (strchr("[?\\", buff[i])) {
- type = MATCH_GLOB;
- break;
+ return MATCH_GLOB;
}
}
+ if (buff[0] == '*')
+ *search = buff + 1;
return type;
}
OpenPOWER on IntegriCloud