summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-10-18 20:26:59 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 08:52:32 -0700
commit8413ac9d8c9a1366a4f57880723126cd24e5a5c3 (patch)
treefcee6ff670dcfccf895a48e92d27f52902d34301
parenta978d6f521063514812a7094dbe5036e056e4de3 (diff)
downloadop-kernel-dev-8413ac9d8c9a1366a4f57880723126cd24e5a5c3.zip
op-kernel-dev-8413ac9d8c9a1366a4f57880723126cd24e5a5c3.tar.gz
mm: page lock use lock bitops
trylock_page, unlock_page open and close a critical section. Hence, we can use the lock bitops to get the desired memory ordering. Also, mark trylock as likely to succeed (and remove the annotation from callers). Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--mm/filemap.c13
-rw-r--r--mm/swapfile.c2
3 files changed, 7 insertions, 10 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7334b2b..709742b 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -311,7 +311,7 @@ static inline void __clear_page_locked(struct page *page)
static inline int trylock_page(struct page *page)
{
- return !test_and_set_bit(PG_locked, &page->flags);
+ return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
}
/*
diff --git a/mm/filemap.c b/mm/filemap.c
index a1ddd25..e1b23fd 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -573,17 +573,14 @@ EXPORT_SYMBOL(wait_on_page_bit);
* mechananism between PageLocked pages and PageWriteback pages is shared.
* But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
*
- * The first mb is necessary to safely close the critical section opened by the
- * test_and_set_bit() to lock the page; the second mb is necessary to enforce
- * ordering between the clear_bit and the read of the waitqueue (to avoid SMP
- * races with a parallel wait_on_page_locked()).
+ * The mb is necessary to enforce ordering between the clear_bit and the read
+ * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
*/
void unlock_page(struct page *page)
{
- smp_mb__before_clear_bit();
- if (!test_and_clear_bit(PG_locked, &page->flags))
- BUG();
- smp_mb__after_clear_bit();
+ VM_BUG_ON(!PageLocked(page));
+ clear_bit_unlock(PG_locked, &page->flags);
+ smp_mb__after_clear_bit();
wake_up_page(page, PG_locked);
}
EXPORT_SYMBOL(unlock_page);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2a97faf..90cb67a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -422,7 +422,7 @@ void free_swap_and_cache(swp_entry_t entry)
if (p) {
if (swap_entry_free(p, swp_offset(entry)) == 1) {
page = find_get_page(&swapper_space, entry.val);
- if (page && unlikely(!trylock_page(page))) {
+ if (page && !trylock_page(page)) {
page_cache_release(page);
page = NULL;
}
OpenPOWER on IntegriCloud