summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-02-01 03:05:36 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-01 08:53:16 -0800
commitb16664e44c54525be89dc07ad15a13b4eeec5634 (patch)
tree1844d193009c1e55e3c26256feb14bc622ec9af3 /mm
parent2a16e3f4b0c408b9e50297d2ec27e295d490267a (diff)
downloadop-kernel-dev-b16664e44c54525be89dc07ad15a13b4eeec5634.zip
op-kernel-dev-b16664e44c54525be89dc07ad15a13b4eeec5634.tar.gz
[PATCH] Direct Migration V9: PageSwapCache checks
Check for PageSwapCache after looking up and locking a swap page. The page migration code may change a swap pte to point to a different page under lock_page(). If that happens then the vm must retry the lookup operation in the swap space to find the correct page number. There are a couple of locations in the VM where a lock_page() is done on a swap page. In these locations we need to check afterwards if the page was migrated. If the page was migrated then the old page that was looked up before was freed and no longer has the PageSwapCache bit set. Signed-off-by: Hirokazu Takahashi <taka@valinux.co.jp> Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Signed-off-by: Christoph Lameter <clameter@@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c7
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/swapfile.c7
3 files changed, 22 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 7a11ddd..2bee1f2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1871,6 +1871,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out;
entry = pte_to_swp_entry(orig_pte);
+again:
page = lookup_swap_cache(entry);
if (!page) {
swapin_readahead(entry, address, vma);
@@ -1894,6 +1895,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
mark_page_accessed(page);
lock_page(page);
+ if (!PageSwapCache(page)) {
+ /* Page migration has occured */
+ unlock_page(page);
+ page_cache_release(page);
+ goto again;
+ }
/*
* Back out if somebody else already faulted in this pte.
diff --git a/mm/shmem.c b/mm/shmem.c
index ce501bc..f7ac7b8 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1028,6 +1028,14 @@ repeat:
page_cache_release(swappage);
goto repeat;
}
+ if (!PageSwapCache(swappage)) {
+ /* Page migration has occured */
+ shmem_swp_unmap(entry);
+ spin_unlock(&info->lock);
+ unlock_page(swappage);
+ page_cache_release(swappage);
+ goto repeat;
+ }
if (PageWriteback(swappage)) {
shmem_swp_unmap(entry);
spin_unlock(&info->lock);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index f1e69c3..9678182 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -646,6 +646,7 @@ static int try_to_unuse(unsigned int type)
*/
swap_map = &si->swap_map[i];
entry = swp_entry(type, i);
+again:
page = read_swap_cache_async(entry, NULL, 0);
if (!page) {
/*
@@ -680,6 +681,12 @@ static int try_to_unuse(unsigned int type)
wait_on_page_locked(page);
wait_on_page_writeback(page);
lock_page(page);
+ if (!PageSwapCache(page)) {
+ /* Page migration has occured */
+ unlock_page(page);
+ page_cache_release(page);
+ goto again;
+ }
wait_on_page_writeback(page);
/*
OpenPOWER on IntegriCloud