summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm_inline.h27
-rw-r--r--include/linux/page-flags.h8
-rw-r--r--mm/memory.c3
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/shmem.c1
-rw-r--r--mm/swap_state.c3
7 files changed, 44 insertions, 2 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 2704729..96e9704 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -1,3 +1,28 @@
+#ifndef LINUX_MM_INLINE_H
+#define LINUX_MM_INLINE_H
+
+/**
+ * page_is_file_cache - should the page be on a file LRU or anon LRU?
+ * @page: the page to test
+ *
+ * Returns !0 if @page is page cache page backed by a regular filesystem,
+ * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
+ * Used by functions that manipulate the LRU lists, to sort a page
+ * onto the right LRU list.
+ *
+ * We would like to get this info without a page flag, but the state
+ * needs to survive until the page is last deleted from the LRU, which
+ * could be as far down as __page_cache_release.
+ */
+static inline int page_is_file_cache(struct page *page)
+{
+ if (PageSwapBacked(page))
+ return 0;
+
+ /* The page is page cache backed by a normal filesystem. */
+ return 1;
+}
+
static inline void
add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{
@@ -65,3 +90,5 @@ static inline enum lru_list page_lru(struct page *page)
return lru;
}
+
+#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index c74d3e8..57b688c 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -93,6 +93,7 @@ enum pageflags {
PG_mappedtodisk, /* Has blocks allocated on-disk */
PG_reclaim, /* To be reclaimed asap */
PG_buddy, /* Page is free, on buddy lists */
+ PG_swapbacked, /* Page is backed by RAM/swap */
#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
PG_uncached, /* Page has been mapped as uncached */
#endif
@@ -176,6 +177,7 @@ PAGEFLAG(SavePinned, savepinned); /* Xen */
PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
__SETPAGEFLAG(Private, private)
+PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
__PAGEFLAG(SlobPage, slob_page)
__PAGEFLAG(SlobFree, slob_free)
@@ -334,7 +336,8 @@ static inline void __ClearPageTail(struct page *page)
* Flags checked in bad_page(). Pages on the free list should not have
* these flags set. It they are, there is a problem.
*/
-#define PAGE_FLAGS_CLEAR_WHEN_BAD (PAGE_FLAGS | 1 << PG_reclaim | 1 << PG_dirty)
+#define PAGE_FLAGS_CLEAR_WHEN_BAD (PAGE_FLAGS | \
+ 1 << PG_reclaim | 1 << PG_dirty | 1 << PG_swapbacked)
/*
* Flags checked when a page is freed. Pages being freed should not have
@@ -347,7 +350,8 @@ static inline void __ClearPageTail(struct page *page)
* Pages being prepped should not have these flags set. It they are, there
* is a problem.
*/
-#define PAGE_FLAGS_CHECK_AT_PREP (PAGE_FLAGS | 1 << PG_reserved | 1 << PG_dirty)
+#define PAGE_FLAGS_CHECK_AT_PREP (PAGE_FLAGS | \
+ 1 << PG_reserved | 1 << PG_dirty | 1 << PG_swapbacked)
#endif /* !__GENERATING_BOUNDS_H */
#endif /* PAGE_FLAGS_H */
diff --git a/mm/memory.c b/mm/memory.c
index 1002f47..7512933 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1888,6 +1888,7 @@ gotten:
ptep_clear_flush_notify(vma, address, page_table);
set_pte_at(mm, address, page_table, entry);
update_mmu_cache(vma, address, entry);
+ SetPageSwapBacked(new_page);
lru_cache_add_active(new_page);
page_add_new_anon_rmap(new_page, vma, address);
@@ -2382,6 +2383,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte_none(*page_table))
goto release;
inc_mm_counter(mm, anon_rss);
+ SetPageSwapBacked(page);
lru_cache_add_active(page);
page_add_new_anon_rmap(page, vma, address);
set_pte_at(mm, address, page_table, entry);
@@ -2523,6 +2525,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
set_pte_at(mm, address, page_table, entry);
if (anon) {
inc_mm_counter(mm, anon_rss);
+ SetPageSwapBacked(page);
lru_cache_add_active(page);
page_add_new_anon_rmap(page, vma, address);
} else {
diff --git a/mm/migrate.c b/mm/migrate.c
index ad15b5e..c073274 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -572,6 +572,8 @@ static int move_to_new_page(struct page *newpage, struct page *page)
/* Prepare mapping for the new page.*/
newpage->index = page->index;
newpage->mapping = page->mapping;
+ if (PageSwapBacked(page))
+ SetPageSwapBacked(newpage);
mapping = page_mapping(page);
if (!mapping)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ee7a96e..2099904 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -462,6 +462,8 @@ static inline int free_pages_check(struct page *page)
bad_page(page);
if (PageDirty(page))
__ClearPageDirty(page);
+ if (PageSwapBacked(page))
+ __ClearPageSwapBacked(page);
/*
* For now, we report if PG_reserved was found set, but do not
* clear it, and do not free the page. But we shall soon need
diff --git a/mm/shmem.c b/mm/shmem.c
index d87958a..fd421ed 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1367,6 +1367,7 @@ repeat:
error = -ENOMEM;
goto failed;
}
+ SetPageSwapBacked(filepage);
/* Precharge page while we can wait, compensate after */
error = mem_cgroup_cache_charge(filepage, current->mm,
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 797c383..7a3ece0 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -75,6 +75,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
BUG_ON(!PageLocked(page));
BUG_ON(PageSwapCache(page));
BUG_ON(PagePrivate(page));
+ BUG_ON(!PageSwapBacked(page));
error = radix_tree_preload(gfp_mask);
if (!error) {
page_cache_get(page);
@@ -303,6 +304,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* May fail (-ENOMEM) if radix-tree node allocation failed.
*/
set_page_locked(new_page);
+ SetPageSwapBacked(new_page);
err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
if (likely(!err)) {
/*
@@ -312,6 +314,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
swap_readpage(NULL, new_page);
return new_page;
}
+ ClearPageSwapBacked(new_page);
clear_page_locked(new_page);
swap_free(entry);
} while (err != -ENOMEM);
OpenPOWER on IntegriCloud