summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit_fsnotify.c5
-rw-r--r--kernel/audit_tree.c12
-rw-r--r--kernel/audit_watch.c5
-rw-r--r--kernel/bpf/inode.c14
-rw-r--r--kernel/cgroup/cpuset.c2
-rw-r--r--kernel/configs/android-recommended.config2
-rw-r--r--kernel/configs/tiny.config4
-rw-r--r--kernel/fork.c10
-rw-r--r--kernel/kcov.c21
-rw-r--r--kernel/kexec_core.c4
-rw-r--r--kernel/module.c4
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/power/main.c5
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/sched/core.c4
-rw-r--r--kernel/trace/Kconfig16
16 files changed, 73 insertions, 39 deletions
diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c
index 52f368b..fba7804 100644
--- a/kernel/audit_fsnotify.c
+++ b/kernel/audit_fsnotify.c
@@ -109,7 +109,7 @@ struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pa
audit_update_mark(audit_mark, dentry->d_inode);
audit_mark->rule = krule;
- ret = fsnotify_add_mark(&audit_mark->mark, inode, NULL, true);
+ ret = fsnotify_add_inode_mark(&audit_mark->mark, inode, true);
if (ret < 0) {
fsnotify_put_mark(&audit_mark->mark);
audit_mark = ERR_PTR(ret);
@@ -165,12 +165,11 @@ static void audit_autoremove_mark_rule(struct audit_fsnotify_mark *audit_mark)
/* Update mark data in audit rules based on fsnotify events. */
static int audit_mark_handle_event(struct fsnotify_group *group,
struct inode *to_tell,
- struct fsnotify_mark *inode_mark,
- struct fsnotify_mark *vfsmount_mark,
u32 mask, const void *data, int data_type,
const unsigned char *dname, u32 cookie,
struct fsnotify_iter_info *iter_info)
{
+ struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info);
struct audit_fsnotify_mark *audit_mark;
const struct inode *inode = NULL;
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 67e6956..c99ebaa 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -288,8 +288,8 @@ static void untag_chunk(struct node *p)
if (!new)
goto Fallback;
- if (fsnotify_add_mark_locked(&new->mark, entry->connector->inode,
- NULL, 1)) {
+ if (fsnotify_add_inode_mark_locked(&new->mark, entry->connector->inode,
+ 1)) {
fsnotify_put_mark(&new->mark);
goto Fallback;
}
@@ -354,7 +354,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
return -ENOMEM;
entry = &chunk->mark;
- if (fsnotify_add_mark(entry, inode, NULL, 0)) {
+ if (fsnotify_add_inode_mark(entry, inode, 0)) {
fsnotify_put_mark(entry);
return -ENOSPC;
}
@@ -434,8 +434,8 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
return -ENOENT;
}
- if (fsnotify_add_mark_locked(chunk_entry,
- old_entry->connector->inode, NULL, 1)) {
+ if (fsnotify_add_inode_mark_locked(chunk_entry,
+ old_entry->connector->inode, 1)) {
spin_unlock(&old_entry->lock);
mutex_unlock(&old_entry->group->mark_mutex);
fsnotify_put_mark(chunk_entry);
@@ -989,8 +989,6 @@ static void evict_chunk(struct audit_chunk *chunk)
static int audit_tree_handle_event(struct fsnotify_group *group,
struct inode *to_tell,
- struct fsnotify_mark *inode_mark,
- struct fsnotify_mark *vfsmount_mark,
u32 mask, const void *data, int data_type,
const unsigned char *file_name, u32 cookie,
struct fsnotify_iter_info *iter_info)
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index f1ba889..c17c0c2 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -160,7 +160,7 @@ static struct audit_parent *audit_init_parent(struct path *path)
fsnotify_init_mark(&parent->mark, audit_watch_group);
parent->mark.mask = AUDIT_FS_WATCH;
- ret = fsnotify_add_mark(&parent->mark, inode, NULL, 0);
+ ret = fsnotify_add_inode_mark(&parent->mark, inode, 0);
if (ret < 0) {
audit_free_parent(parent);
return ERR_PTR(ret);
@@ -472,12 +472,11 @@ void audit_remove_watch_rule(struct audit_krule *krule)
/* Update watch data in audit rules based on fsnotify events. */
static int audit_watch_handle_event(struct fsnotify_group *group,
struct inode *to_tell,
- struct fsnotify_mark *inode_mark,
- struct fsnotify_mark *vfsmount_mark,
u32 mask, const void *data, int data_type,
const unsigned char *dname, u32 cookie,
struct fsnotify_iter_info *iter_info)
{
+ struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info);
const struct inode *inode;
struct audit_parent *parent;
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index ed13645..76efe9a 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -295,6 +295,15 @@ static const struct file_operations bpffs_map_fops = {
.release = bpffs_map_release,
};
+static int bpffs_obj_open(struct inode *inode, struct file *file)
+{
+ return -EIO;
+}
+
+static const struct file_operations bpffs_obj_fops = {
+ .open = bpffs_obj_open,
+};
+
static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
const struct inode_operations *iops,
const struct file_operations *fops)
@@ -314,7 +323,8 @@ static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
{
- return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops, NULL);
+ return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops,
+ &bpffs_obj_fops);
}
static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
@@ -322,7 +332,7 @@ static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
struct bpf_map *map = arg;
return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
- map->btf ? &bpffs_map_fops : NULL);
+ map->btf ? &bpffs_map_fops : &bpffs_obj_fops);
}
static struct dentry *
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index d8b12e0..266f10c 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -605,7 +605,7 @@ static inline int nr_cpusets(void)
* load balancing domains (sched domains) as specified by that partial
* partition.
*
- * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
+ * See "What is sched_load_balance" in Documentation/cgroup-v1/cpusets.txt
* for a background explanation of this.
*
* Does not return errors, on the theory that the callers of this
diff --git a/kernel/configs/android-recommended.config b/kernel/configs/android-recommended.config
index 946fb92..81e9af7 100644
--- a/kernel/configs/android-recommended.config
+++ b/kernel/configs/android-recommended.config
@@ -12,7 +12,7 @@ CONFIG_BLK_DEV_DM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_STACKPROTECTOR_STRONG=y
CONFIG_COMPACTION=y
CONFIG_CPU_SW_DOMAIN_PAN=y
CONFIG_DM_CRYPT=y
diff --git a/kernel/configs/tiny.config b/kernel/configs/tiny.config
index 9bfdffc..7fa0c4a 100644
--- a/kernel/configs/tiny.config
+++ b/kernel/configs/tiny.config
@@ -10,7 +10,3 @@ CONFIG_OPTIMIZE_INLINING=y
# CONFIG_SLAB is not set
# CONFIG_SLUB is not set
CONFIG_SLOB=y
-CONFIG_CC_STACKPROTECTOR_NONE=y
-# CONFIG_CC_STACKPROTECTOR_REGULAR is not set
-# CONFIG_CC_STACKPROTECTOR_STRONG is not set
-# CONFIG_CC_STACKPROTECTOR_AUTO is not set
diff --git a/kernel/fork.c b/kernel/fork.c
index 08c6e5e..9440d61 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -440,6 +440,14 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
continue;
}
charge = 0;
+ /*
+ * Don't duplicate many vmas if we've been oom-killed (for
+ * example)
+ */
+ if (fatal_signal_pending(current)) {
+ retval = -EINTR;
+ goto out;
+ }
if (mpnt->vm_flags & VM_ACCOUNT) {
unsigned long len = vma_pages(mpnt);
@@ -811,7 +819,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
clear_tsk_need_resched(tsk);
set_task_stack_end_magic(tsk);
-#ifdef CONFIG_CC_STACKPROTECTOR
+#ifdef CONFIG_STACKPROTECTOR
tsk->stack_canary = get_random_canary();
#endif
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 2c16f1a..3ebd09e 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -58,7 +58,7 @@ struct kcov {
static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
{
- enum kcov_mode mode;
+ unsigned int mode;
/*
* We are interested in code coverage as a function of a syscall inputs,
@@ -241,7 +241,8 @@ static void kcov_put(struct kcov *kcov)
void kcov_task_init(struct task_struct *t)
{
- t->kcov_mode = KCOV_MODE_DISABLED;
+ WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
+ barrier();
t->kcov_size = 0;
t->kcov_area = NULL;
t->kcov = NULL;
@@ -323,6 +324,21 @@ static int kcov_close(struct inode *inode, struct file *filep)
return 0;
}
+/*
+ * Fault in a lazily-faulted vmalloc area before it can be used by
+ * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
+ * vmalloc fault handling path is instrumented.
+ */
+static void kcov_fault_in_area(struct kcov *kcov)
+{
+ unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
+ unsigned long *area = kcov->area;
+ unsigned long offset;
+
+ for (offset = 0; offset < kcov->size; offset += stride)
+ READ_ONCE(area[offset]);
+}
+
static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
unsigned long arg)
{
@@ -371,6 +387,7 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
#endif
else
return -EINVAL;
+ kcov_fault_in_area(kcov);
/* Cache in task struct for performance. */
t->kcov_size = kcov->size;
t->kcov_area = kcov->area;
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 20fef1a..23a83a4 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -829,6 +829,8 @@ static int kimage_load_normal_segment(struct kimage *image,
else
buf += mchunk;
mbytes -= mchunk;
+
+ cond_resched();
}
out:
return result;
@@ -893,6 +895,8 @@ static int kimage_load_crash_segment(struct kimage *image,
else
buf += mchunk;
mbytes -= mchunk;
+
+ cond_resched();
}
out:
return result;
diff --git a/kernel/module.c b/kernel/module.c
index 68469b3..f475f30 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -274,9 +274,7 @@ static void module_assert_mutex_or_preempt(void)
}
static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
-#ifndef CONFIG_MODULE_SIG_FORCE
module_param(sig_enforce, bool_enable_only, 0644);
-#endif /* !CONFIG_MODULE_SIG_FORCE */
/*
* Export sig_enforce kernel cmdline parameter to allow other subsystems rely
@@ -2785,7 +2783,7 @@ static int module_sig_check(struct load_info *info, int flags)
}
/* Not having a signature is only an error if we're strict. */
- if (err == -ENOKEY && !sig_enforce)
+ if (err == -ENOKEY && !is_module_sig_enforced())
err = 0;
return err;
diff --git a/kernel/panic.c b/kernel/panic.c
index 42e4874..8b2e002 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -623,7 +623,7 @@ static __init int register_warn_debugfs(void)
device_initcall(register_warn_debugfs);
#endif
-#ifdef CONFIG_CC_STACKPROTECTOR
+#ifdef CONFIG_STACKPROTECTOR
/*
* Called when gcc's -fstack-protector feature is used, and
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 705c236..d9706da 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -455,8 +455,9 @@ struct kobject *power_kobj;
* state - control system sleep states.
*
* show() returns available sleep state labels, which may be "mem", "standby",
- * "freeze" and "disk" (hibernation). See Documentation/power/states.txt for a
- * description of what they mean.
+ * "freeze" and "disk" (hibernation).
+ * See Documentation/admin-guide/pm/sleep-states.rst for a description of
+ * what they mean.
*
* store() accepts one of those strings, translates it into the proper
* enumerated value, and initiates a suspend transition.
diff --git a/kernel/relay.c b/kernel/relay.c
index 9f5326e..04f2486 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -39,7 +39,7 @@ static void relay_file_mmap_close(struct vm_area_struct *vma)
/*
* fault() vm_op implementation for relay file mapping.
*/
-static int relay_buf_fault(struct vm_fault *vmf)
+static vm_fault_t relay_buf_fault(struct vm_fault *vmf)
{
struct page *page;
struct rchan_buf *buf = vmf->vma->vm_private_data;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a98d54c..78d8fac 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -10,6 +10,8 @@
#include <linux/kthread.h>
#include <linux/nospec.h>
+#include <linux/kcov.h>
+
#include <asm/switch_to.h>
#include <asm/tlb.h>
@@ -2633,6 +2635,7 @@ static inline void
prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
+ kcov_prepare_switch(prev);
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
rseq_preempt(prev);
@@ -2702,6 +2705,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
finish_task(prev);
finish_lock_switch(rq);
finish_arch_post_lock_switch();
+ kcov_finish_switch(current);
fire_sched_in_preempt_notifiers(current);
/*
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index dd6c0a2..dcc0166 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -12,22 +12,22 @@ config NOP_TRACER
config HAVE_FTRACE_NMI_ENTER
bool
help
- See Documentation/trace/ftrace-design.txt
+ See Documentation/trace/ftrace-design.rst
config HAVE_FUNCTION_TRACER
bool
help
- See Documentation/trace/ftrace-design.txt
+ See Documentation/trace/ftrace-design.rst
config HAVE_FUNCTION_GRAPH_TRACER
bool
help
- See Documentation/trace/ftrace-design.txt
+ See Documentation/trace/ftrace-design.rst
config HAVE_DYNAMIC_FTRACE
bool
help
- See Documentation/trace/ftrace-design.txt
+ See Documentation/trace/ftrace-design.rst
config HAVE_DYNAMIC_FTRACE_WITH_REGS
bool
@@ -35,12 +35,12 @@ config HAVE_DYNAMIC_FTRACE_WITH_REGS
config HAVE_FTRACE_MCOUNT_RECORD
bool
help
- See Documentation/trace/ftrace-design.txt
+ See Documentation/trace/ftrace-design.rst
config HAVE_SYSCALL_TRACEPOINTS
bool
help
- See Documentation/trace/ftrace-design.txt
+ See Documentation/trace/ftrace-design.rst
config HAVE_FENTRY
bool
@@ -448,7 +448,7 @@ config KPROBE_EVENTS
help
This allows the user to add tracing events (similar to tracepoints)
on the fly via the ftrace interface. See
- Documentation/trace/kprobetrace.txt for more details.
+ Documentation/trace/kprobetrace.rst for more details.
Those events can be inserted wherever kprobes can probe, and record
various register and memory values.
@@ -575,7 +575,7 @@ config MMIOTRACE
implementation and works via page faults. Tracing is disabled by
default and can be enabled at run-time.
- See Documentation/trace/mmiotrace.txt.
+ See Documentation/trace/mmiotrace.rst.
If you are not helping to develop drivers, say N.
config TRACING_MAP
OpenPOWER on IntegriCloud