summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/filemap.c1
-rw-r--r--mm/fremap.c2
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memory.c23
-rw-r--r--mm/page-writeback.c4
6 files changed, 19 insertions, 14 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index e24d348..a7609cb 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -137,6 +137,7 @@ config SPLIT_PTLOCK_CPUS
int
default "4096" if ARM && !CPU_CACHE_VIPT
default "4096" if PARISC && !PA20
+ default "4096" if XEN
default "4"
#
diff --git a/mm/filemap.c b/mm/filemap.c
index 90b657b..15c8413 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1388,6 +1388,7 @@ retry_find:
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (unlikely(vmf->pgoff >= size)) {
unlock_page(page);
+ page_cache_release(page);
goto outside_data_content;
}
diff --git a/mm/fremap.c b/mm/fremap.c
index c395b1a..95bcb56 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -160,7 +160,7 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR))
goto out;
- if (!vma->vm_flags & VM_CAN_NONLINEAR)
+ if (!(vma->vm_flags & VM_CAN_NONLINEAR))
goto out;
if (end <= start || start < vma->vm_start || end > vma->vm_end)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 84c795e..eab8c42 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -42,7 +42,7 @@ static void clear_huge_page(struct page *page, unsigned long addr)
might_sleep();
for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
cond_resched();
- clear_user_highpage(page + i, addr);
+ clear_user_highpage(page + i, addr + i * PAGE_SIZE);
}
}
diff --git a/mm/memory.c b/mm/memory.c
index ca8cac1..f82b359b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1639,6 +1639,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *old_page, *new_page;
pte_t entry;
int reuse = 0, ret = 0;
+ int page_mkwrite = 0;
struct page *dirty_page = NULL;
old_page = vm_normal_page(vma, address, orig_pte);
@@ -1687,6 +1688,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
page_cache_release(old_page);
if (!pte_same(*page_table, orig_pte))
goto unlock;
+
+ page_mkwrite = 1;
}
dirty_page = old_page;
get_page(dirty_page);
@@ -1774,7 +1777,7 @@ unlock:
* do_no_page is protected similarly.
*/
wait_on_page_locked(dirty_page);
- set_page_dirty_balance(dirty_page);
+ set_page_dirty_balance(dirty_page, page_mkwrite);
put_page(dirty_page);
}
return ret;
@@ -2307,13 +2310,14 @@ oom:
* do not need to flush old virtual caches or the TLB.
*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
- * but allow concurrent faults), and pte mapped but not yet locked.
+ * but allow concurrent faults), and pte neither mapped nor locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
*/
static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, pte_t *page_table, pmd_t *pmd,
+ unsigned long address, pmd_t *pmd,
pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
{
+ pte_t *page_table;
spinlock_t *ptl;
struct page *page;
pte_t entry;
@@ -2321,13 +2325,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *dirty_page = NULL;
struct vm_fault vmf;
int ret;
+ int page_mkwrite = 0;
vmf.virtual_address = (void __user *)(address & PAGE_MASK);
vmf.pgoff = pgoff;
vmf.flags = flags;
vmf.page = NULL;
- pte_unmap(page_table);
BUG_ON(vma->vm_flags & VM_PFNMAP);
if (likely(vma->vm_ops->fault)) {
@@ -2398,6 +2402,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
anon = 1; /* no anon but release vmf.page */
goto out;
}
+ page_mkwrite = 1;
}
}
@@ -2453,7 +2458,7 @@ out_unlocked:
if (anon)
page_cache_release(vmf.page);
else if (dirty_page) {
- set_page_dirty_balance(dirty_page);
+ set_page_dirty_balance(dirty_page, page_mkwrite);
put_page(dirty_page);
}
@@ -2468,8 +2473,8 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
- return __do_fault(mm, vma, address, page_table, pmd, pgoff,
- flags, orig_pte);
+ pte_unmap(page_table);
+ return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
@@ -2552,9 +2557,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
}
pgoff = pte_to_pgoff(orig_pte);
-
- return __do_fault(mm, vma, address, page_table, pmd, pgoff,
- flags, orig_pte);
+ return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
/*
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 63512a9..4472036 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -274,9 +274,9 @@ static void balance_dirty_pages(struct address_space *mapping)
pdflush_operation(background_writeout, 0);
}
-void set_page_dirty_balance(struct page *page)
+void set_page_dirty_balance(struct page *page, int page_mkwrite)
{
- if (set_page_dirty(page)) {
+ if (set_page_dirty(page) || page_mkwrite) {
struct address_space *mapping = page_mapping(page);
if (mapping)
OpenPOWER on IntegriCloud