summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c9
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory.c37
-rw-r--r--mm/page_alloc.c20
-rw-r--r--mm/shmem.c53
-rw-r--r--mm/vmscan.c124
6 files changed, 137 insertions, 108 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ea8c3a4..5f34bd8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2508,6 +2508,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
{
struct hstate *h = hstate_vma(vma);
int ret = VM_FAULT_SIGBUS;
+ int anon_rmap = 0;
pgoff_t idx;
unsigned long size;
struct page *page;
@@ -2562,14 +2563,13 @@ retry:
spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(h);
spin_unlock(&inode->i_lock);
- page_dup_rmap(page);
} else {
lock_page(page);
if (unlikely(anon_vma_prepare(vma))) {
ret = VM_FAULT_OOM;
goto backout_unlocked;
}
- hugepage_add_new_anon_rmap(page, vma, address);
+ anon_rmap = 1;
}
} else {
/*
@@ -2582,7 +2582,6 @@ retry:
VM_FAULT_SET_HINDEX(h - hstates);
goto backout_unlocked;
}
- page_dup_rmap(page);
}
/*
@@ -2606,6 +2605,10 @@ retry:
if (!huge_pte_none(huge_ptep_get(ptep)))
goto backout;
+ if (anon_rmap)
+ hugepage_add_new_anon_rmap(page, vma, address);
+ else
+ page_dup_rmap(page);
new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c3688df..556859f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3247,7 +3247,7 @@ int mem_cgroup_prepare_migration(struct page *page,
ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
else
ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
- __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
+ __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype);
return ret;
}
diff --git a/mm/memory.c b/mm/memory.c
index 5e30583..fa2f04e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -878,15 +878,24 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
}
if (likely(!non_swap_entry(entry)))
rss[MM_SWAPENTS]++;
- else if (is_write_migration_entry(entry) &&
- is_cow_mapping(vm_flags)) {
- /*
- * COW mappings require pages in both parent
- * and child to be set to read.
- */
- make_migration_entry_read(&entry);
- pte = swp_entry_to_pte(entry);
- set_pte_at(src_mm, addr, src_pte, pte);
+ else if (is_migration_entry(entry)) {
+ page = migration_entry_to_page(entry);
+
+ if (PageAnon(page))
+ rss[MM_ANONPAGES]++;
+ else
+ rss[MM_FILEPAGES]++;
+
+ if (is_write_migration_entry(entry) &&
+ is_cow_mapping(vm_flags)) {
+ /*
+ * COW mappings require pages in both
+ * parent and child to be set to read.
+ */
+ make_migration_entry_read(&entry);
+ pte = swp_entry_to_pte(entry);
+ set_pte_at(src_mm, addr, src_pte, pte);
+ }
}
}
goto out_set_pte;
@@ -1191,6 +1200,16 @@ again:
if (!non_swap_entry(entry))
rss[MM_SWAPENTS]--;
+ else if (is_migration_entry(entry)) {
+ struct page *page;
+
+ page = migration_entry_to_page(entry);
+
+ if (PageAnon(page))
+ rss[MM_ANONPAGES]--;
+ else
+ rss[MM_FILEPAGES]--;
+ }
if (unlikely(!free_swap_and_cache(entry)))
print_bad_pte(vma, addr, ptent, NULL);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0027d8f..d2186ec 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5413,7 +5413,25 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
bool is_pageblock_removable_nolock(struct page *page)
{
- struct zone *zone = page_zone(page);
+ struct zone *zone;
+ unsigned long pfn;
+
+ /*
+ * We have to be careful here because we are iterating over memory
+ * sections which are not zone aware so we might end up outside of
+ * the zone but still within the section.
+ * We have to take care about the node as well. If the node is offline
+ * its NODE_DATA will be NULL - see page_zone.
+ */
+ if (!node_online(page_to_nid(page)))
+ return false;
+
+ zone = page_zone(page);
+ pfn = page_to_pfn(page);
+ if (zone->zone_start_pfn > pfn ||
+ zone->zone_start_pfn + zone->spanned_pages <= pfn)
+ return false;
+
return __count_immobile_pages(zone, page, 0);
}
diff --git a/mm/shmem.c b/mm/shmem.c
index feead19..269d049 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -379,7 +379,7 @@ static int shmem_free_swap(struct address_space *mapping,
/*
* Pagevec may contain swap entries, so shuffle up pages before releasing.
*/
-static void shmem_pagevec_release(struct pagevec *pvec)
+static void shmem_deswap_pagevec(struct pagevec *pvec)
{
int i, j;
@@ -389,7 +389,36 @@ static void shmem_pagevec_release(struct pagevec *pvec)
pvec->pages[j++] = page;
}
pvec->nr = j;
- pagevec_release(pvec);
+}
+
+/*
+ * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
+ */
+void shmem_unlock_mapping(struct address_space *mapping)
+{
+ struct pagevec pvec;
+ pgoff_t indices[PAGEVEC_SIZE];
+ pgoff_t index = 0;
+
+ pagevec_init(&pvec, 0);
+ /*
+ * Minor point, but we might as well stop if someone else SHM_LOCKs it.
+ */
+ while (!mapping_unevictable(mapping)) {
+ /*
+ * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
+ * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
+ */
+ pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+ PAGEVEC_SIZE, pvec.pages, indices);
+ if (!pvec.nr)
+ break;
+ index = indices[pvec.nr - 1] + 1;
+ shmem_deswap_pagevec(&pvec);
+ check_move_unevictable_pages(pvec.pages, pvec.nr);
+ pagevec_release(&pvec);
+ cond_resched();
+ }
}
/*
@@ -440,7 +469,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
}
unlock_page(page);
}
- shmem_pagevec_release(&pvec);
+ shmem_deswap_pagevec(&pvec);
+ pagevec_release(&pvec);
mem_cgroup_uncharge_end();
cond_resched();
index++;
@@ -470,7 +500,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
continue;
}
if (index == start && indices[0] > end) {
- shmem_pagevec_release(&pvec);
+ shmem_deswap_pagevec(&pvec);
+ pagevec_release(&pvec);
break;
}
mem_cgroup_uncharge_start();
@@ -494,7 +525,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
}
unlock_page(page);
}
- shmem_pagevec_release(&pvec);
+ shmem_deswap_pagevec(&pvec);
+ pagevec_release(&pvec);
mem_cgroup_uncharge_end();
index++;
}
@@ -1068,13 +1100,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
user_shm_unlock(inode->i_size, user);
info->flags &= ~VM_LOCKED;
mapping_clear_unevictable(file->f_mapping);
- /*
- * Ensure that a racing putback_lru_page() can see
- * the pages of this mapping are evictable when we
- * skip them due to !PageLRU during the scan.
- */
- smp_mb__after_clear_bit();
- scan_mapping_unevictable_pages(file->f_mapping);
}
retval = 0;
@@ -2445,6 +2470,10 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
return 0;
}
+void shmem_unlock_mapping(struct address_space *mapping)
+{
+}
+
void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
{
truncate_inode_pages_range(inode->i_mapping, lstart, lend);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2880396..c52b235 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -26,7 +26,6 @@
#include <linux/buffer_head.h> /* for try_to_release_page(),
buffer_heads_over_limit */
#include <linux/mm_inline.h>
-#include <linux/pagevec.h>
#include <linux/backing-dev.h>
#include <linux/rmap.h>
#include <linux/topology.h>
@@ -661,7 +660,7 @@ redo:
* When racing with an mlock or AS_UNEVICTABLE clearing
* (page is unlocked) make sure that if the other thread
* does not observe our setting of PG_lru and fails
- * isolation/check_move_unevictable_page,
+ * isolation/check_move_unevictable_pages,
* we see PG_mlocked/AS_UNEVICTABLE cleared below and move
* the page back to the evictable list.
*
@@ -3499,100 +3498,61 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
return 1;
}
+#ifdef CONFIG_SHMEM
/**
- * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
- * @page: page to check evictability and move to appropriate lru list
- * @zone: zone page is in
+ * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
+ * @pages: array of pages to check
+ * @nr_pages: number of pages to check
*
- * Checks a page for evictability and moves the page to the appropriate
- * zone lru list.
+ * Checks pages for evictability and moves them to the appropriate lru list.
*
- * Restrictions: zone->lru_lock must be held, page must be on LRU and must
- * have PageUnevictable set.
+ * This function is only used for SysV IPC SHM_UNLOCK.
*/
-static void check_move_unevictable_page(struct page *page, struct zone *zone)
+void check_move_unevictable_pages(struct page **pages, int nr_pages)
{
struct lruvec *lruvec;
+ struct zone *zone = NULL;
+ int pgscanned = 0;
+ int pgrescued = 0;
+ int i;
- VM_BUG_ON(PageActive(page));
-retry:
- ClearPageUnevictable(page);
- if (page_evictable(page, NULL)) {
- enum lru_list l = page_lru_base_type(page);
-
- __dec_zone_state(zone, NR_UNEVICTABLE);
- lruvec = mem_cgroup_lru_move_lists(zone, page,
- LRU_UNEVICTABLE, l);
- list_move(&page->lru, &lruvec->lists[l]);
- __inc_zone_state(zone, NR_INACTIVE_ANON + l);
- __count_vm_event(UNEVICTABLE_PGRESCUED);
- } else {
- /*
- * rotate unevictable list
- */
- SetPageUnevictable(page);
- lruvec = mem_cgroup_lru_move_lists(zone, page, LRU_UNEVICTABLE,
- LRU_UNEVICTABLE);
- list_move(&page->lru, &lruvec->lists[LRU_UNEVICTABLE]);
- if (page_evictable(page, NULL))
- goto retry;
- }
-}
-
-/**
- * scan_mapping_unevictable_pages - scan an address space for evictable pages
- * @mapping: struct address_space to scan for evictable pages
- *
- * Scan all pages in mapping. Check unevictable pages for
- * evictability and move them to the appropriate zone lru list.
- */
-void scan_mapping_unevictable_pages(struct address_space *mapping)
-{
- pgoff_t next = 0;
- pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
- struct zone *zone;
- struct pagevec pvec;
-
- if (mapping->nrpages == 0)
- return;
-
- pagevec_init(&pvec, 0);
- while (next < end &&
- pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
- int i;
- int pg_scanned = 0;
-
- zone = NULL;
-
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i];
- pgoff_t page_index = page->index;
- struct zone *pagezone = page_zone(page);
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pages[i];
+ struct zone *pagezone;
- pg_scanned++;
- if (page_index > next)
- next = page_index;
- next++;
+ pgscanned++;
+ pagezone = page_zone(page);
+ if (pagezone != zone) {
+ if (zone)
+ spin_unlock_irq(&zone->lru_lock);
+ zone = pagezone;
+ spin_lock_irq(&zone->lru_lock);
+ }
- if (pagezone != zone) {
- if (zone)
- spin_unlock_irq(&zone->lru_lock);
- zone = pagezone;
- spin_lock_irq(&zone->lru_lock);
- }
+ if (!PageLRU(page) || !PageUnevictable(page))
+ continue;
- if (PageLRU(page) && PageUnevictable(page))
- check_move_unevictable_page(page, zone);
+ if (page_evictable(page, NULL)) {
+ enum lru_list lru = page_lru_base_type(page);
+
+ VM_BUG_ON(PageActive(page));
+ ClearPageUnevictable(page);
+ __dec_zone_state(zone, NR_UNEVICTABLE);
+ lruvec = mem_cgroup_lru_move_lists(zone, page,
+ LRU_UNEVICTABLE, lru);
+ list_move(&page->lru, &lruvec->lists[lru]);
+ __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
+ pgrescued++;
}
- if (zone)
- spin_unlock_irq(&zone->lru_lock);
- pagevec_release(&pvec);
-
- count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
}
+ if (zone) {
+ __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
+ __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
+ spin_unlock_irq(&zone->lru_lock);
+ }
}
+#endif /* CONFIG_SHMEM */
static void warn_scan_unevictable_pages(void)
{
OpenPOWER on IntegriCloud