diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 6 | ||||
-rw-r--r-- | lib/Kconfig.debug | 6 | ||||
-rw-r--r-- | lib/Kconfig.kmemcheck | 91 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/atomic64.c | 175 | ||||
-rw-r--r-- | lib/kobject.c | 7 |
6 files changed, 284 insertions, 3 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 9960be0..bb1326d 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -194,4 +194,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS config NLATTR bool +# +# Generic 64-bit atomic support is selected if needed +# +config GENERIC_ATOMIC64 + bool + endmenu diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 116a350..6b0c2d8a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -300,7 +300,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT config DEBUG_SLAB bool "Debug slab memory allocations" - depends on DEBUG_KERNEL && SLAB + depends on DEBUG_KERNEL && SLAB && !KMEMCHECK help Say Y here to have the kernel do limited verification on memory allocation as well as poisoning memory on free to catch use of freed @@ -312,7 +312,7 @@ config DEBUG_SLAB_LEAK config SLUB_DEBUG_ON bool "SLUB debugging on by default" - depends on SLUB && SLUB_DEBUG + depends on SLUB && SLUB_DEBUG && !KMEMCHECK default n help Boot with debugging on by default. SLUB boots by default with @@ -996,3 +996,5 @@ config DMA_API_DEBUG source "samples/Kconfig" source "lib/Kconfig.kgdb" + +source "lib/Kconfig.kmemcheck" diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck new file mode 100644 index 0000000..603c81b --- /dev/null +++ b/lib/Kconfig.kmemcheck @@ -0,0 +1,91 @@ +config HAVE_ARCH_KMEMCHECK + bool + +menuconfig KMEMCHECK + bool "kmemcheck: trap use of uninitialized memory" + depends on DEBUG_KERNEL + depends on !X86_USE_3DNOW + depends on SLUB || SLAB + depends on !CC_OPTIMIZE_FOR_SIZE + depends on !FUNCTION_TRACER + select FRAME_POINTER + select STACKTRACE + default n + help + This option enables tracing of dynamically allocated kernel memory + to see if memory is used before it has been given an initial value. + Be aware that this requires half of your memory for bookkeeping and + will insert extra code at *every* read and write to tracked memory + thus slow down the kernel code (but user code is unaffected). + + The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable + or enable kmemcheck at boot-time. If the kernel is started with + kmemcheck=0, the large memory and CPU overhead is not incurred. + +choice + prompt "kmemcheck: default mode at boot" + depends on KMEMCHECK + default KMEMCHECK_ONESHOT_BY_DEFAULT + help + This option controls the default behaviour of kmemcheck when the + kernel boots and no kmemcheck= parameter is given. + +config KMEMCHECK_DISABLED_BY_DEFAULT + bool "disabled" + depends on KMEMCHECK + +config KMEMCHECK_ENABLED_BY_DEFAULT + bool "enabled" + depends on KMEMCHECK + +config KMEMCHECK_ONESHOT_BY_DEFAULT + bool "one-shot" + depends on KMEMCHECK + help + In one-shot mode, only the first error detected is reported before + kmemcheck is disabled. + +endchoice + +config KMEMCHECK_QUEUE_SIZE + int "kmemcheck: error queue size" + depends on KMEMCHECK + default 64 + help + Select the maximum number of errors to store in the queue. Since + errors can occur virtually anywhere and in any context, we need a + temporary storage area which is guarantueed not to generate any + other faults. The queue will be emptied as soon as a tasklet may + be scheduled. If the queue is full, new error reports will be + lost. + +config KMEMCHECK_SHADOW_COPY_SHIFT + int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)" + depends on KMEMCHECK + range 2 8 + default 5 + help + Select the number of shadow bytes to save along with each entry of + the queue. These bytes indicate what parts of an allocation are + initialized, uninitialized, etc. and will be displayed when an + error is detected to help the debugging of a particular problem. + +config KMEMCHECK_PARTIAL_OK + bool "kmemcheck: allow partially uninitialized memory" + depends on KMEMCHECK + default y + help + This option works around certain GCC optimizations that produce + 32-bit reads from 16-bit variables where the upper 16 bits are + thrown away afterwards. This may of course also hide some real + bugs. + +config KMEMCHECK_BITOPS_OK + bool "kmemcheck: allow bit-field manipulation" + depends on KMEMCHECK + default n + help + This option silences warnings that would be generated for bit-field + accesses where not all the bits are initialized at the same time. + This may also hide some real bugs. + diff --git a/lib/Makefile b/lib/Makefile index 34c5c0e..8e9bcf9 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -95,6 +95,8 @@ obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o obj-$(CONFIG_GENERIC_CSUM) += checksum.o +obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/atomic64.c b/lib/atomic64.c new file mode 100644 index 0000000..c5e7255 --- /dev/null +++ b/lib/atomic64.c @@ -0,0 +1,175 @@ +/* + * Generic implementation of 64-bit atomics using spinlocks, + * useful on processors that don't have 64-bit atomic instructions. + * + * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <linux/types.h> +#include <linux/cache.h> +#include <linux/spinlock.h> +#include <linux/init.h> +#include <asm/atomic.h> + +/* + * We use a hashed array of spinlocks to provide exclusive access + * to each atomic64_t variable. Since this is expected to used on + * systems with small numbers of CPUs (<= 4 or so), we use a + * relatively small array of 16 spinlocks to avoid wasting too much + * memory on the spinlock array. + */ +#define NR_LOCKS 16 + +/* + * Ensure each lock is in a separate cacheline. + */ +static union { + spinlock_t lock; + char pad[L1_CACHE_BYTES]; +} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; + +static inline spinlock_t *lock_addr(const atomic64_t *v) +{ + unsigned long addr = (unsigned long) v; + + addr >>= L1_CACHE_SHIFT; + addr ^= (addr >> 8) ^ (addr >> 16); + return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; +} + +long long atomic64_read(const atomic64_t *v) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + long long val; + + spin_lock_irqsave(lock, flags); + val = v->counter; + spin_unlock_irqrestore(lock, flags); + return val; +} + +void atomic64_set(atomic64_t *v, long long i) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + + spin_lock_irqsave(lock, flags); + v->counter = i; + spin_unlock_irqrestore(lock, flags); +} + +void atomic64_add(long long a, atomic64_t *v) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + + spin_lock_irqsave(lock, flags); + v->counter += a; + spin_unlock_irqrestore(lock, flags); +} + +long long atomic64_add_return(long long a, atomic64_t *v) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + long long val; + + spin_lock_irqsave(lock, flags); + val = v->counter += a; + spin_unlock_irqrestore(lock, flags); + return val; +} + +void atomic64_sub(long long a, atomic64_t *v) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + + spin_lock_irqsave(lock, flags); + v->counter -= a; + spin_unlock_irqrestore(lock, flags); +} + +long long atomic64_sub_return(long long a, atomic64_t *v) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + long long val; + + spin_lock_irqsave(lock, flags); + val = v->counter -= a; + spin_unlock_irqrestore(lock, flags); + return val; +} + +long long atomic64_dec_if_positive(atomic64_t *v) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + long long val; + + spin_lock_irqsave(lock, flags); + val = v->counter - 1; + if (val >= 0) + v->counter = val; + spin_unlock_irqrestore(lock, flags); + return val; +} + +long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + long long val; + + spin_lock_irqsave(lock, flags); + val = v->counter; + if (val == o) + v->counter = n; + spin_unlock_irqrestore(lock, flags); + return val; +} + +long long atomic64_xchg(atomic64_t *v, long long new) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + long long val; + + spin_lock_irqsave(lock, flags); + val = v->counter; + v->counter = new; + spin_unlock_irqrestore(lock, flags); + return val; +} + +int atomic64_add_unless(atomic64_t *v, long long a, long long u) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + int ret = 1; + + spin_lock_irqsave(lock, flags); + if (v->counter != u) { + v->counter += a; + ret = 0; + } + spin_unlock_irqrestore(lock, flags); + return ret; +} + +static int init_atomic64_lock(void) +{ + int i; + + for (i = 0; i < NR_LOCKS; ++i) + spin_lock_init(&atomic64_lock[i].lock); + return 0; +} + +pure_initcall(init_atomic64_lock); diff --git a/lib/kobject.c b/lib/kobject.c index bacf6fe..b512b74 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -793,11 +793,16 @@ static struct kset *kset_create(const char *name, struct kobject *parent_kobj) { struct kset *kset; + int retval; kset = kzalloc(sizeof(*kset), GFP_KERNEL); if (!kset) return NULL; - kobject_set_name(&kset->kobj, name); + retval = kobject_set_name(&kset->kobj, name); + if (retval) { + kfree(kset); + return NULL; + } kset->uevent_ops = uevent_ops; kset->kobj.parent = parent_kobj; |