summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug8
-rw-r--r--lib/Makefile2
-rw-r--r--lib/ioremap.c39
-rw-r--r--lib/iov_iter.c69
-rw-r--r--lib/kobject.c5
-rw-r--r--lib/radix-tree.c4
-rw-r--r--lib/refcount.c183
-rw-r--r--lib/sbitmap.c75
-rw-r--r--lib/syscall.c1
-rw-r--r--lib/test_kasan.c10
-rw-r--r--lib/test_user_copy.c1
-rw-r--r--lib/usercopy.c26
12 files changed, 350 insertions, 73 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 97d62c2..2598a32 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -356,7 +356,7 @@ config FRAME_POINTER
bool "Compile the kernel with frame pointers"
depends on DEBUG_KERNEL && \
(CRIS || M68K || FRV || UML || \
- AVR32 || SUPERH || BLACKFIN || MN10300 || METAG) || \
+ SUPERH || BLACKFIN || MN10300 || METAG) || \
ARCH_WANT_FRAME_POINTERS
default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
help
@@ -1103,9 +1103,6 @@ config PROVE_LOCKING
For more details, see Documentation/locking/lockdep-design.txt.
-config PROVE_LOCKING_SMALL
- bool
-
config LOCKDEP
bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -1114,6 +1111,9 @@ config LOCKDEP
select KALLSYMS
select KALLSYMS_ALL
+config LOCKDEP_SMALL
+ bool
+
config LOCK_STAT
bool "Lock usage statistics"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
diff --git a/lib/Makefile b/lib/Makefile
index 320ac46a..b47cf97 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -41,7 +41,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
- once.o refcount.o
+ once.o refcount.o usercopy.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += hexdump.o
diff --git a/lib/ioremap.c b/lib/ioremap.c
index a3e14ce..4bb3020 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -14,6 +14,7 @@
#include <asm/pgtable.h>
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+static int __read_mostly ioremap_p4d_capable;
static int __read_mostly ioremap_pud_capable;
static int __read_mostly ioremap_pmd_capable;
static int __read_mostly ioremap_huge_disabled;
@@ -35,6 +36,11 @@ void __init ioremap_huge_init(void)
}
}
+static inline int ioremap_p4d_enabled(void)
+{
+ return ioremap_p4d_capable;
+}
+
static inline int ioremap_pud_enabled(void)
{
return ioremap_pud_capable;
@@ -46,6 +52,7 @@ static inline int ioremap_pmd_enabled(void)
}
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+static inline int ioremap_p4d_enabled(void) { return 0; }
static inline int ioremap_pud_enabled(void) { return 0; }
static inline int ioremap_pmd_enabled(void) { return 0; }
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
@@ -94,14 +101,14 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
return 0;
}
-static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
+static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
pud_t *pud;
unsigned long next;
phys_addr -= addr;
- pud = pud_alloc(&init_mm, pgd, addr);
+ pud = pud_alloc(&init_mm, p4d, addr);
if (!pud)
return -ENOMEM;
do {
@@ -120,6 +127,32 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
return 0;
}
+static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
+{
+ p4d_t *p4d;
+ unsigned long next;
+
+ phys_addr -= addr;
+ p4d = p4d_alloc(&init_mm, pgd, addr);
+ if (!p4d)
+ return -ENOMEM;
+ do {
+ next = p4d_addr_end(addr, end);
+
+ if (ioremap_p4d_enabled() &&
+ ((next - addr) == P4D_SIZE) &&
+ IS_ALIGNED(phys_addr + addr, P4D_SIZE)) {
+ if (p4d_set_huge(p4d, phys_addr + addr, prot))
+ continue;
+ }
+
+ if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot))
+ return -ENOMEM;
+ } while (p4d++, addr = next, addr != end);
+ return 0;
+}
+
int ioremap_page_range(unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
@@ -135,7 +168,7 @@ int ioremap_page_range(unsigned long addr,
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
- err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot);
+ err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
if (err)
break;
} while (pgd++, addr = next, addr != end);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index e68604a..4952311 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -413,7 +413,7 @@ void iov_iter_init(struct iov_iter *i, int direction,
size_t count)
{
/* It will get better. Eventually... */
- if (segment_eq(get_fs(), KERNEL_DS)) {
+ if (uaccess_kernel()) {
direction |= ITER_KVEC;
i->type = direction;
i->kvec = (struct kvec *)iov;
@@ -604,7 +604,7 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
return 0;
}
iterate_and_advance(i, bytes, v,
- __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
+ __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
@@ -625,7 +625,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
if (unlikely(i->count < bytes))
return false;
iterate_all_kinds(i, bytes, v, ({
- if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
+ if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len))
return false;
0;}),
@@ -786,6 +786,68 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
}
EXPORT_SYMBOL(iov_iter_advance);
+void iov_iter_revert(struct iov_iter *i, size_t unroll)
+{
+ if (!unroll)
+ return;
+ i->count += unroll;
+ if (unlikely(i->type & ITER_PIPE)) {
+ struct pipe_inode_info *pipe = i->pipe;
+ int idx = i->idx;
+ size_t off = i->iov_offset;
+ while (1) {
+ size_t n = off - pipe->bufs[idx].offset;
+ if (unroll < n) {
+ off -= unroll;
+ break;
+ }
+ unroll -= n;
+ if (!unroll && idx == i->start_idx) {
+ off = 0;
+ break;
+ }
+ if (!idx--)
+ idx = pipe->buffers - 1;
+ off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
+ }
+ i->iov_offset = off;
+ i->idx = idx;
+ pipe_truncate(i);
+ return;
+ }
+ if (unroll <= i->iov_offset) {
+ i->iov_offset -= unroll;
+ return;
+ }
+ unroll -= i->iov_offset;
+ if (i->type & ITER_BVEC) {
+ const struct bio_vec *bvec = i->bvec;
+ while (1) {
+ size_t n = (--bvec)->bv_len;
+ i->nr_segs++;
+ if (unroll <= n) {
+ i->bvec = bvec;
+ i->iov_offset = n - unroll;
+ return;
+ }
+ unroll -= n;
+ }
+ } else { /* same logics for iovec and kvec */
+ const struct iovec *iov = i->iov;
+ while (1) {
+ size_t n = (--iov)->iov_len;
+ i->nr_segs++;
+ if (unroll <= n) {
+ i->iov = iov;
+ i->iov_offset = n - unroll;
+ return;
+ }
+ unroll -= n;
+ }
+ }
+}
+EXPORT_SYMBOL(iov_iter_revert);
+
/*
* Return the count of just the current iov_iter segment.
*/
@@ -839,6 +901,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
i->iov_offset = 0;
i->count = count;
+ i->start_idx = i->idx;
}
EXPORT_SYMBOL(iov_iter_pipe);
diff --git a/lib/kobject.c b/lib/kobject.c
index 445dcae..763d70a 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -601,12 +601,15 @@ struct kobject *kobject_get(struct kobject *kobj)
}
EXPORT_SYMBOL(kobject_get);
-static struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj)
+struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj)
{
+ if (!kobj)
+ return NULL;
if (!kref_get_unless_zero(&kobj->kref))
kobj = NULL;
return kobj;
}
+EXPORT_SYMBOL(kobject_get_unless_zero);
/*
* kobject_cleanup - free kobject resources.
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 5ed506d..691a9ad 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -2129,8 +2129,8 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
if (!bitmap)
return 0;
- bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap);
- kfree(bitmap);
+ if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap))
+ kfree(bitmap);
}
return 1;
diff --git a/lib/refcount.c b/lib/refcount.c
index 1d33366..f42124c 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -37,11 +37,29 @@
#include <linux/refcount.h>
#include <linux/bug.h>
+/**
+ * refcount_add_not_zero - add a value to a refcount unless it is 0
+ * @i: the value to add to the refcount
+ * @r: the refcount
+ *
+ * Will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_inc(), or one of its variants, should instead be used to
+ * increment a reference count.
+ *
+ * Return: false if the passed refcount is 0, true otherwise
+ */
bool refcount_add_not_zero(unsigned int i, refcount_t *r)
{
- unsigned int old, new, val = atomic_read(&r->refs);
+ unsigned int new, val = atomic_read(&r->refs);
- for (;;) {
+ do {
if (!val)
return false;
@@ -51,37 +69,54 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
new = val + i;
if (new < val)
new = UINT_MAX;
- old = atomic_cmpxchg_relaxed(&r->refs, val, new);
- if (old == val)
- break;
- val = old;
- }
+ } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
- WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+ WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
return true;
}
EXPORT_SYMBOL_GPL(refcount_add_not_zero);
+/**
+ * refcount_add - add a value to a refcount
+ * @i: the value to add to the refcount
+ * @r: the refcount
+ *
+ * Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_inc(), or one of its variants, should instead be used to
+ * increment a reference count.
+ */
void refcount_add(unsigned int i, refcount_t *r)
{
- WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
+ WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
}
EXPORT_SYMBOL_GPL(refcount_add);
-/*
- * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
+/**
+ * refcount_inc_not_zero - increment a refcount unless it is 0
+ * @r: the refcount to increment
+ *
+ * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
*
* Provides no memory ordering, it is assumed the caller has guaranteed the
* object memory to be stable (RCU, etc.). It does provide a control dependency
* and thereby orders future stores. See the comment on top.
+ *
+ * Return: true if the increment was successful, false otherwise
*/
bool refcount_inc_not_zero(refcount_t *r)
{
- unsigned int old, new, val = atomic_read(&r->refs);
+ unsigned int new, val = atomic_read(&r->refs);
- for (;;) {
+ do {
new = val + 1;
if (!val)
@@ -90,63 +125,84 @@ bool refcount_inc_not_zero(refcount_t *r)
if (unlikely(!new))
return true;
- old = atomic_cmpxchg_relaxed(&r->refs, val, new);
- if (old == val)
- break;
-
- val = old;
- }
+ } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
- WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+ WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
return true;
}
EXPORT_SYMBOL_GPL(refcount_inc_not_zero);
-/*
- * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
+/**
+ * refcount_inc - increment a refcount
+ * @r: the refcount to increment
+ *
+ * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
*
* Provides no memory ordering, it is assumed the caller already has a
- * reference on the object, will WARN when this is not so.
+ * reference on the object.
+ *
+ * Will WARN if the refcount is 0, as this represents a possible use-after-free
+ * condition.
*/
void refcount_inc(refcount_t *r)
{
- WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+ WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
}
EXPORT_SYMBOL_GPL(refcount_inc);
+/**
+ * refcount_sub_and_test - subtract from a refcount and test if it is 0
+ * @i: amount to subtract from the refcount
+ * @r: the refcount
+ *
+ * Similar to atomic_dec_and_test(), but it will WARN, return false and
+ * ultimately leak on underflow and will fail to decrement when saturated
+ * at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_dec(), or one of its variants, should instead be used to
+ * decrement a reference count.
+ *
+ * Return: true if the resulting refcount is 0, false otherwise
+ */
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
- unsigned int old, new, val = atomic_read(&r->refs);
+ unsigned int new, val = atomic_read(&r->refs);
- for (;;) {
+ do {
if (unlikely(val == UINT_MAX))
return false;
new = val - i;
if (new > val) {
- WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+ WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
return false;
}
- old = atomic_cmpxchg_release(&r->refs, val, new);
- if (old == val)
- break;
-
- val = old;
- }
+ } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
return !new;
}
EXPORT_SYMBOL_GPL(refcount_sub_and_test);
-/*
+/**
+ * refcount_dec_and_test - decrement a refcount and test if it is 0
+ * @r: the refcount
+ *
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
* decrement when saturated at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
* See the comment on top.
+ *
+ * Return: true if the resulting refcount is 0, false otherwise
*/
bool refcount_dec_and_test(refcount_t *r)
{
@@ -154,21 +210,26 @@ bool refcount_dec_and_test(refcount_t *r)
}
EXPORT_SYMBOL_GPL(refcount_dec_and_test);
-/*
+/**
+ * refcount_dec - decrement a refcount
+ * @r: the refcount
+ *
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement
* when saturated at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
* before.
*/
-
void refcount_dec(refcount_t *r)
{
- WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
+ WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
}
EXPORT_SYMBOL_GPL(refcount_dec);
-/*
+/**
+ * refcount_dec_if_one - decrement a refcount if it is 1
+ * @r: the refcount
+ *
* No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
* success thereof.
*
@@ -178,24 +239,33 @@ EXPORT_SYMBOL_GPL(refcount_dec);
* It can be used like a try-delete operator; this explicit case is provided
* and not cmpxchg in generic, because that would allow implementing unsafe
* operations.
+ *
+ * Return: true if the resulting refcount is 0, false otherwise
*/
bool refcount_dec_if_one(refcount_t *r)
{
- return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
+ int val = 1;
+
+ return atomic_try_cmpxchg_release(&r->refs, &val, 0);
}
EXPORT_SYMBOL_GPL(refcount_dec_if_one);
-/*
+/**
+ * refcount_dec_not_one - decrement a refcount if it is not 1
+ * @r: the refcount
+ *
* No atomic_t counterpart, it decrements unless the value is 1, in which case
* it will return false.
*
* Was often done like: atomic_add_unless(&var, -1, 1)
+ *
+ * Return: true if the decrement operation was successful, false otherwise
*/
bool refcount_dec_not_one(refcount_t *r)
{
- unsigned int old, new, val = atomic_read(&r->refs);
+ unsigned int new, val = atomic_read(&r->refs);
- for (;;) {
+ do {
if (unlikely(val == UINT_MAX))
return true;
@@ -204,28 +274,31 @@ bool refcount_dec_not_one(refcount_t *r)
new = val - 1;
if (new > val) {
- WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+ WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
return true;
}
- old = atomic_cmpxchg_release(&r->refs, val, new);
- if (old == val)
- break;
-
- val = old;
- }
+ } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
return true;
}
EXPORT_SYMBOL_GPL(refcount_dec_not_one);
-/*
+/**
+ * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
+ * refcount to 0
+ * @r: the refcount
+ * @lock: the mutex to be locked
+ *
* Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
* to decrement when saturated at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
* See the comment on top.
+ *
+ * Return: true and hold mutex if able to decrement refcount to 0, false
+ * otherwise
*/
bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
{
@@ -242,13 +315,21 @@ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
}
EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock);
-/*
+/**
+ * refcount_dec_and_lock - return holding spinlock if able to decrement
+ * refcount to 0
+ * @r: the refcount
+ * @lock: the spinlock to be locked
+ *
* Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
* decrement when saturated at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
* See the comment on top.
+ *
+ * Return: true and hold spinlock if able to decrement refcount to 0, false
+ * otherwise
*/
bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
{
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 60e800e..80aa8d5 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -79,15 +79,15 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
}
EXPORT_SYMBOL_GPL(sbitmap_resize);
-static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint,
- bool wrap)
+static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
+ unsigned int hint, bool wrap)
{
unsigned int orig_hint = hint;
int nr;
while (1) {
- nr = find_next_zero_bit(&word->word, word->depth, hint);
- if (unlikely(nr >= word->depth)) {
+ nr = find_next_zero_bit(word, depth, hint);
+ if (unlikely(nr >= depth)) {
/*
* We started with an offset, and we didn't reset the
* offset to 0 in a failure case, so start from 0 to
@@ -100,11 +100,11 @@ static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint,
return -1;
}
- if (!test_and_set_bit(nr, &word->word))
+ if (!test_and_set_bit(nr, word))
break;
hint = nr + 1;
- if (hint >= word->depth - 1)
+ if (hint >= depth - 1)
hint = 0;
}
@@ -119,7 +119,8 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
index = SB_NR_TO_INDEX(sb, alloc_hint);
for (i = 0; i < sb->map_nr; i++) {
- nr = __sbitmap_get_word(&sb->map[index],
+ nr = __sbitmap_get_word(&sb->map[index].word,
+ sb->map[index].depth,
SB_NR_TO_BIT(sb, alloc_hint),
!round_robin);
if (nr != -1) {
@@ -141,6 +142,37 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
}
EXPORT_SYMBOL_GPL(sbitmap_get);
+int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
+ unsigned long shallow_depth)
+{
+ unsigned int i, index;
+ int nr = -1;
+
+ index = SB_NR_TO_INDEX(sb, alloc_hint);
+
+ for (i = 0; i < sb->map_nr; i++) {
+ nr = __sbitmap_get_word(&sb->map[index].word,
+ min(sb->map[index].depth, shallow_depth),
+ SB_NR_TO_BIT(sb, alloc_hint), true);
+ if (nr != -1) {
+ nr += index << sb->shift;
+ break;
+ }
+
+ /* Jump to next index. */
+ index++;
+ alloc_hint = index << sb->shift;
+
+ if (index >= sb->map_nr) {
+ index = 0;
+ alloc_hint = 0;
+ }
+ }
+
+ return nr;
+}
+EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
+
bool sbitmap_any_bit_set(const struct sbitmap *sb)
{
unsigned int i;
@@ -342,6 +374,35 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq)
}
EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
+int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
+ unsigned int shallow_depth)
+{
+ unsigned int hint, depth;
+ int nr;
+
+ hint = this_cpu_read(*sbq->alloc_hint);
+ depth = READ_ONCE(sbq->sb.depth);
+ if (unlikely(hint >= depth)) {
+ hint = depth ? prandom_u32() % depth : 0;
+ this_cpu_write(*sbq->alloc_hint, hint);
+ }
+ nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
+
+ if (nr == -1) {
+ /* If the map is full, a hint won't do us much good. */
+ this_cpu_write(*sbq->alloc_hint, 0);
+ } else if (nr == hint || unlikely(sbq->round_robin)) {
+ /* Only update the hint if we used it. */
+ hint = nr + 1;
+ if (hint >= depth - 1)
+ hint = 0;
+ this_cpu_write(*sbq->alloc_hint, hint);
+ }
+
+ return nr;
+}
+EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
+
static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
{
int i, wake_index;
diff --git a/lib/syscall.c b/lib/syscall.c
index 17d5ff5..2c6cd1b 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -12,6 +12,7 @@ static int collect_syscall(struct task_struct *target, long *callno,
if (!try_get_task_stack(target)) {
/* Task has no stack, so the task isn't in a syscall. */
+ *sp = *pc = 0;
*callno = -1;
return 0;
}
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 0b1d314..a25c976 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -20,6 +20,7 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/kasan.h>
/*
* Note: test functions are marked noinline so that their names appear in
@@ -474,6 +475,12 @@ static noinline void __init use_after_scope_test(void)
static int __init kmalloc_tests_init(void)
{
+ /*
+ * Temporarily enable multi-shot mode. Otherwise, we'd only get a
+ * report for the first case.
+ */
+ bool multishot = kasan_save_enable_multi_shot();
+
kmalloc_oob_right();
kmalloc_oob_left();
kmalloc_node_oob_right();
@@ -499,6 +506,9 @@ static int __init kmalloc_tests_init(void)
ksize_unpoisons_memory();
copy_user_test();
use_after_scope_test();
+
+ kasan_restore_multi_shot(multishot);
+
return -EAGAIN;
}
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index 1a8d71a..4621db8 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -31,7 +31,6 @@
* their capability at compile-time, we just have to opt-out certain archs.
*/
#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
- !defined(CONFIG_AVR32) && \
!defined(CONFIG_BLACKFIN) && \
!defined(CONFIG_M32R) && \
!defined(CONFIG_M68K) && \
diff --git a/lib/usercopy.c b/lib/usercopy.c
new file mode 100644
index 0000000..1b6010a
--- /dev/null
+++ b/lib/usercopy.c
@@ -0,0 +1,26 @@
+#include <linux/uaccess.h>
+
+/* out-of-line parts */
+
+#ifndef INLINE_COPY_FROM_USER
+unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long res = n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = raw_copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
+}
+EXPORT_SYMBOL(_copy_from_user);
+#endif
+
+#ifndef INLINE_COPY_TO_USER
+unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n)
+{
+ if (likely(access_ok(VERIFY_WRITE, to, n)))
+ n = raw_copy_to_user(to, from, n);
+ return n;
+}
+EXPORT_SYMBOL(_copy_to_user);
+#endif
OpenPOWER on IntegriCloud