summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c3
-rw-r--r--mm/slab_common.c8
-rw-r--r--mm/slub.c3
3 files changed, 12 insertions, 2 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 1c02f6e..b9b0df6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4426,7 +4426,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
* to be a temporary method to find any missing usercopy
* whitelists.
*/
- if (offset <= cachep->object_size &&
+ if (usercopy_fallback &&
+ offset <= cachep->object_size &&
n <= cachep->object_size - offset) {
usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
return;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index fc3e66b..a51f654 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -31,6 +31,14 @@ LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);
struct kmem_cache *kmem_cache;
+#ifdef CONFIG_HARDENED_USERCOPY
+bool usercopy_fallback __ro_after_init =
+ IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK);
+module_param(usercopy_fallback, bool, 0400);
+MODULE_PARM_DESC(usercopy_fallback,
+ "WARN instead of reject usercopy whitelist violations");
+#endif
+
static LIST_HEAD(slab_caches_to_rcu_destroy);
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
diff --git a/mm/slub.c b/mm/slub.c
index 6d9b1e7..862d835 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3859,7 +3859,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
* whitelists.
*/
object_size = slab_ksize(s);
- if (offset <= object_size && n <= object_size - offset) {
+ if (usercopy_fallback &&
+ offset <= object_size && n <= object_size - offset) {
usercopy_warn("SLUB object", s->name, to_user, offset, n);
return;
}
OpenPOWER on IntegriCloud