summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2012-11-27 22:59:52 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-11-28 07:33:50 -0800
commit4b05a1c74d1cfae37cf6ff293ee928350f031418 (patch)
tree1b5c9507f5622fbf1affe4fc4e07ed0b60c3413e
parente23739b4ade80a3a7f87198f008f6c44a7cbc9fd (diff)
downloadop-kernel-dev-4b05a1c74d1cfae37cf6ff293ee928350f031418.zip
op-kernel-dev-4b05a1c74d1cfae37cf6ff293ee928350f031418.tar.gz
percpu-rwsem: use synchronize_sched_expedited
Use synchronize_sched_expedited() instead of synchronize_sched() to improve mount speed. This patch improves mount time from 0.500s to 0.013s for Jeff's test-case. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Reported-and-tested-by: Jeff Chua <jeff.chua.linux@gmail.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/percpu-rwsem.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 250a4ac..bd1e860 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -13,7 +13,7 @@ struct percpu_rw_semaphore {
};
#define light_mb() barrier()
-#define heavy_mb() synchronize_sched()
+#define heavy_mb() synchronize_sched_expedited()
static inline void percpu_down_read(struct percpu_rw_semaphore *p)
{
@@ -51,7 +51,7 @@ static inline void percpu_down_write(struct percpu_rw_semaphore *p)
{
mutex_lock(&p->mtx);
p->locked = true;
- synchronize_sched(); /* make sure that all readers exit the rcu_read_lock_sched region */
+ synchronize_sched_expedited(); /* make sure that all readers exit the rcu_read_lock_sched region */
while (__percpu_count(p->counters))
msleep(1);
heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
OpenPOWER on IntegriCloud