summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorSeth Jennings <sjenning@linux.vnet.ibm.com>2013-04-29 15:08:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 15:54:38 -0700
commit2f772e6cadf8ad8fca38927b17e6be028be669f5 (patch)
tree5eac4cfd72c6e8e9b4d48ad7463cdf1a8b97f7b4 /mm
parente8420a8ece80b3fe810415ecf061d54ca7fab266 (diff)
downloadop-kernel-dev-2f772e6cadf8ad8fca38927b17e6be028be669f5.zip
op-kernel-dev-2f772e6cadf8ad8fca38927b17e6be028be669f5.tar.gz
mm: break up swap_writepage() for frontswap backends
swap_writepage() is currently where frontswap hooks into the swap write path to capture pages with the frontswap_store() function. However, if a frontswap backend wants to "resume" the writeback of a page to the swap device, it can't call swap_writepage() as the page will simply reenter the backend. This patch separates swap_writepage() into a top and bottom half, the bottom half named __swap_writepage() to allow a frontswap backend, like zswap, to resume writeback beyond the frontswap_store() hook. __add_to_swap_cache() is also made non-static so that the page for which writeback is to be resumed can be added to the swap cache. Signed-off-by: Seth Jennings <sjenning@linux.vnet.ibm.com> Signed-off-by: Bob Liu <bob.liu@oracle.com> Acked-by: Minchan Kim <minchan@kernel.org> Reviewed-by: Dan Magenheimer <dan.magenheimer@oracle.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_io.c14
-rw-r--r--mm/swap_state.c2
2 files changed, 12 insertions, 4 deletions
diff --git a/mm/page_io.c b/mm/page_io.c
index 78eee32..8e6bcf1 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -185,9 +185,7 @@ bad_bmap:
*/
int swap_writepage(struct page *page, struct writeback_control *wbc)
{
- struct bio *bio;
- int ret = 0, rw = WRITE;
- struct swap_info_struct *sis = page_swap_info(page);
+ int ret = 0;
if (try_to_free_swap(page)) {
unlock_page(page);
@@ -199,6 +197,16 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
end_page_writeback(page);
goto out;
}
+ ret = __swap_writepage(page, wbc);
+out:
+ return ret;
+}
+
+int __swap_writepage(struct page *page, struct writeback_control *wbc)
+{
+ struct bio *bio;
+ int ret = 0, rw = WRITE;
+ struct swap_info_struct *sis = page_swap_info(page);
if (sis->flags & SWP_FILE) {
struct kiocb kiocb;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 7efcf15..fe43fd5 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -78,7 +78,7 @@ void show_swap_cache_info(void)
* __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
* but sets SwapCache flag and private instead of mapping and index.
*/
-static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
+int __add_to_swap_cache(struct page *page, swp_entry_t entry)
{
int error;
struct address_space *address_space;
OpenPOWER on IntegriCloud