diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-10-09 12:36:13 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-10-09 12:36:13 +0200 |
commit | 37bf06375c90a42fe07b9bebdb07bc316ae5a0ce (patch) | |
tree | de572dd6d3955b0725001776a7b03796f99e1e8e /lib | |
parent | 6bfa687c19b7ab8adee03f0d43c197c2945dd869 (diff) | |
parent | d0e639c9e06d44e713170031fe05fb60ebe680af (diff) | |
download | op-kernel-dev-37bf06375c90a42fe07b9bebdb07bc316ae5a0ce.zip op-kernel-dev-37bf06375c90a42fe07b9bebdb07bc316ae5a0ce.tar.gz |
Merge tag 'v3.12-rc4' into sched/core
Merge Linux v3.12-rc4 to fix a conflict and also to refresh the tree
before applying more scheduler patches.
Conflicts:
arch/avr32/include/asm/Kbuild
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/hexdump.c | 2 | ||||
-rw-r--r-- | lib/kobject.c | 5 | ||||
-rw-r--r-- | lib/lockref.c | 23 |
3 files changed, 23 insertions, 7 deletions
diff --git a/lib/hexdump.c b/lib/hexdump.c index 3f0494c..8499c81 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c @@ -14,6 +14,8 @@ const char hex_asc[] = "0123456789abcdef"; EXPORT_SYMBOL(hex_asc); +const char hex_asc_upper[] = "0123456789ABCDEF"; +EXPORT_SYMBOL(hex_asc_upper); /** * hex_to_bin - convert a hex digit to its real value diff --git a/lib/kobject.c b/lib/kobject.c index 9621751..669bf19 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -933,10 +933,7 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj) bool kobj_ns_current_may_mount(enum kobj_ns_type type) { - bool may_mount = false; - - if (type == KOBJ_NS_TYPE_NONE) - return true; + bool may_mount = true; spin_lock(&kobj_ns_type_lock); if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && diff --git a/lib/lockref.c b/lib/lockref.c index e2cd2c0..6f9d434 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -4,6 +4,22 @@ #ifdef CONFIG_CMPXCHG_LOCKREF /* + * Allow weakly-ordered memory architectures to provide barrier-less + * cmpxchg semantics for lockref updates. + */ +#ifndef cmpxchg64_relaxed +# define cmpxchg64_relaxed cmpxchg64 +#endif + +/* + * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP. + * This is useful for architectures with an expensive cpu_relax(). + */ +#ifndef arch_mutex_cpu_relax +# define arch_mutex_cpu_relax() cpu_relax() +#endif + +/* * Note that the "cmpxchg()" reloads the "old" value for the * failure case. */ @@ -14,12 +30,13 @@ while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ struct lockref new = old, prev = old; \ CODE \ - old.lock_count = cmpxchg(&lockref->lock_count, \ - old.lock_count, new.lock_count); \ + old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \ + old.lock_count, \ + new.lock_count); \ if (likely(old.lock_count == prev.lock_count)) { \ SUCCESS; \ } \ - cpu_relax(); \ + arch_mutex_cpu_relax(); \ } \ } while (0) |