diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2015-07-29 08:31:24 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-08-03 11:34:17 +0200 |
commit | ed79e946732e5311934d7f404b3b4e702e45cb97 (patch) | |
tree | 707e84f82e9b7f07c9638671ccbb85807fdcdfaf /arch/s390/lib | |
parent | 3bbfafb77a06327fa1bc9f19bc55b5c558475091 (diff) | |
download | op-kernel-dev-ed79e946732e5311934d7f404b3b4e702e45cb97.zip op-kernel-dev-ed79e946732e5311934d7f404b3b4e702e45cb97.tar.gz |
s390/uaccess, locking/static_keys: employ static_branch_likely()
Use the new static_branch_likely() primitive to make sure that the
most likely case is executed without taking an unconditional branch.
This wasn't possible with the old jump label primitives.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/20150729064600.GB3953@osiris
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/s390/lib')
-rw-r--r-- | arch/s390/lib/uaccess.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index 4614d41..93cb1d0 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -15,7 +15,7 @@ #include <asm/mmu_context.h> #include <asm/facility.h> -static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE; +static DEFINE_STATIC_KEY_FALSE(have_mvcos); static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, unsigned long size) @@ -104,7 +104,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr, unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { - if (static_key_false(&have_mvcos)) + if (static_branch_likely(&have_mvcos)) return copy_from_user_mvcos(to, from, n); return copy_from_user_mvcp(to, from, n); } @@ -177,7 +177,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x, unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { - if (static_key_false(&have_mvcos)) + if (static_branch_likely(&have_mvcos)) return copy_to_user_mvcos(to, from, n); return copy_to_user_mvcs(to, from, n); } @@ -240,7 +240,7 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n) { - if (static_key_false(&have_mvcos)) + if (static_branch_likely(&have_mvcos)) return copy_in_user_mvcos(to, from, n); return copy_in_user_mvc(to, from, n); } @@ -312,7 +312,7 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size) unsigned long __clear_user(void __user *to, unsigned long size) { - if (static_key_false(&have_mvcos)) + if (static_branch_likely(&have_mvcos)) return clear_user_mvcos(to, size); return clear_user_xc(to, size); } @@ -386,7 +386,7 @@ early_param("uaccess_primary", parse_uaccess_pt); static int __init uaccess_init(void) { if (!uaccess_primary && test_facility(27)) - static_key_slow_inc(&have_mvcos); + static_branch_enable(&have_mvcos); return 0; } early_initcall(uaccess_init); |