diff options
-rw-r--r-- | include/linux/uaccess.h | 2 | ||||
-rw-r--r-- | mm/slab.c | 22 | ||||
-rw-r--r-- | mm/slub.c | 23 | ||||
-rw-r--r-- | mm/usercopy.c | 21 |
4 files changed, 58 insertions, 10 deletions
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 38b6442..efe79c1 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -274,6 +274,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); #endif #ifdef CONFIG_HARDENED_USERCOPY +void usercopy_warn(const char *name, const char *detail, bool to_user, + unsigned long offset, unsigned long len); void __noreturn usercopy_abort(const char *name, const char *detail, bool to_user, unsigned long offset, unsigned long len); @@ -4392,7 +4392,9 @@ module_init(slab_proc_init); #ifdef CONFIG_HARDENED_USERCOPY /* - * Rejects objects that are incorrectly sized. + * Rejects incorrectly sized objects and objects that are to be copied + * to/from userspace but do not fall entirely within the containing slab + * cache's usercopy region. * * Returns NULL if check passes, otherwise const char * to name of cache * to indicate an error. @@ -4412,10 +4414,24 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, /* Find offset within object. */ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep); - /* Allow address range falling entirely within object size. */ - if (offset <= cachep->object_size && n <= cachep->object_size - offset) + /* Allow address range falling entirely within usercopy region. */ + if (offset >= cachep->useroffset && + offset - cachep->useroffset <= cachep->usersize && + n <= cachep->useroffset - offset + cachep->usersize) return; + /* + * If the copy is still within the allocated object, produce + * a warning instead of rejecting the copy. This is intended + * to be a temporary method to find any missing usercopy + * whitelists. + */ + if (offset <= cachep->object_size && + n <= cachep->object_size - offset) { + usercopy_warn("SLAB object", cachep->name, to_user, offset, n); + return; + } + usercopy_abort("SLAB object", cachep->name, to_user, offset, n); } #endif /* CONFIG_HARDENED_USERCOPY */ @@ -3813,7 +3813,9 @@ EXPORT_SYMBOL(__kmalloc_node); #ifdef CONFIG_HARDENED_USERCOPY /* - * Rejects objects that are incorrectly sized. + * Rejects incorrectly sized objects and objects that are to be copied + * to/from userspace but do not fall entirely within the containing slab + * cache's usercopy region. * * Returns NULL if check passes, otherwise const char * to name of cache * to indicate an error. @@ -3827,7 +3829,6 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, /* Find object and usable object size. */ s = page->slab_cache; - object_size = slab_ksize(s); /* Reject impossible pointers. */ if (ptr < page_address(page)) @@ -3845,10 +3846,24 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, offset -= s->red_left_pad; } - /* Allow address range falling entirely within object size. */ - if (offset <= object_size && n <= object_size - offset) + /* Allow address range falling entirely within usercopy region. */ + if (offset >= s->useroffset && + offset - s->useroffset <= s->usersize && + n <= s->useroffset - offset + s->usersize) return; + /* + * If the copy is still within the allocated object, produce + * a warning instead of rejecting the copy. This is intended + * to be a temporary method to find any missing usercopy + * whitelists. + */ + object_size = slab_ksize(s); + if (offset <= object_size && n <= object_size - offset) { + usercopy_warn("SLUB object", s->name, to_user, offset, n); + return; + } + usercopy_abort("SLUB object", s->name, to_user, offset, n); } #endif /* CONFIG_HARDENED_USERCOPY */ diff --git a/mm/usercopy.c b/mm/usercopy.c index a562dd0..e9e9325 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -59,13 +59,28 @@ static noinline int check_stack_object(const void *obj, unsigned long len) } /* - * If this function is reached, then CONFIG_HARDENED_USERCOPY has found an - * unexpected state during a copy_from_user() or copy_to_user() call. + * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found + * an unexpected state during a copy_from_user() or copy_to_user() call. * There are several checks being performed on the buffer by the * __check_object_size() function. Normal stack buffer usage should never * trip the checks, and kernel text addressing will always trip the check. - * For cache objects, copies must be within the object size. + * For cache objects, it is checking that only the whitelisted range of + * bytes for a given cache is being accessed (via the cache's usersize and + * useroffset fields). To adjust a cache whitelist, use the usercopy-aware + * kmem_cache_create_usercopy() function to create the cache (and + * carefully audit the whitelist range). */ +void usercopy_warn(const char *name, const char *detail, bool to_user, + unsigned long offset, unsigned long len) +{ + WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n", + to_user ? "exposure" : "overwrite", + to_user ? "from" : "to", + name ? : "unknown?!", + detail ? " '" : "", detail ? : "", detail ? "'" : "", + offset, len); +} + void __noreturn usercopy_abort(const char *name, const char *detail, bool to_user, unsigned long offset, unsigned long len) |