summaryrefslogtreecommitdiffstats
path: root/include/linux/percpu-rwsem.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-11-23 15:23:55 +0100
committerIngo Molnar <mingo@kernel.org>2016-09-22 15:25:54 +0200
commit259d69b7f056bc9a543c7d184e791ef6c2775081 (patch)
treee7db40679995eec5e61bc105e095db645d4e5f04 /include/linux/percpu-rwsem.h
parent7c3f654d8e18942295eeda42f7d75494443980e0 (diff)
downloadop-kernel-dev-259d69b7f056bc9a543c7d184e791ef6c2775081.zip
op-kernel-dev-259d69b7f056bc9a543c7d184e791ef6c2775081.tar.gz
locking/percpu-rwsem: Add down_read_preempt_disable()
Provide a down_read()/up_read() variant that keeps preemption disabled over the whole thing, when possible. This avoids a needless preemption point for constructs such as: percpu_down_read(&global_rwsem); spin_lock(&lock); ... spin_unlock(&lock); percpu_up_read(&global_rwsem); Which perturbs timings. In particular it was found to cure a performance regression in a follow up patch in fs/locks.c Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/percpu-rwsem.h')
-rw-r--r--include/linux/percpu-rwsem.h24
1 files changed, 18 insertions, 6 deletions
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index d402d39..5b2e615 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -28,7 +28,7 @@ static struct percpu_rw_semaphore name = { \
extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
extern void __percpu_up_read(struct percpu_rw_semaphore *);
-static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
{
might_sleep();
@@ -46,13 +46,19 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
__this_cpu_inc(*sem->read_count);
if (unlikely(!rcu_sync_is_idle(&sem->rss)))
__percpu_down_read(sem, false); /* Unconditional memory barrier */
- preempt_enable();
+ barrier();
/*
- * The barrier() from preempt_enable() prevents the compiler from
+ * The barrier() prevents the compiler from
* bleeding the critical section out.
*/
}
+static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+{
+ percpu_down_read_preempt_disable(sem);
+ preempt_enable();
+}
+
static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{
int ret = 1;
@@ -76,13 +82,13 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
return ret;
}
-static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
+static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
{
/*
- * The barrier() in preempt_disable() prevents the compiler from
+ * The barrier() prevents the compiler from
* bleeding the critical section out.
*/
- preempt_disable();
+ barrier();
/*
* Same as in percpu_down_read().
*/
@@ -95,6 +101,12 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
}
+static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
+{
+ preempt_disable();
+ percpu_up_read_preempt_enable(sem);
+}
+
extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *);
OpenPOWER on IntegriCloud