summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
author <jgarzik@pretzel.yyz.us>2005-05-27 22:07:40 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-05-27 22:07:40 -0400
commitff0e0ea2f5d36fa90fc2c57fd019102b0a0cfabf (patch)
tree963cdd52a4032cd4827896c4e813cfbf6dd7b3e6 /mm
parent43f66a6ce8da299344cf1bc2ac2311889cc88555 (diff)
parent1f15d694522af9cd7492695f11dd2dc77b6cf098 (diff)
downloadop-kernel-dev-ff0e0ea2f5d36fa90fc2c57fd019102b0a0cfabf.zip
op-kernel-dev-ff0e0ea2f5d36fa90fc2c57fd019102b0a0cfabf.tar.gz
Automatic merge of /spare/repo/netdev-2.6 branch we18
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/memory.c17
-rw-r--r--mm/mmap.c63
-rw-r--r--mm/mremap.c7
-rw-r--r--mm/nommu.c3
-rw-r--r--mm/rmap.c8
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/vmalloc.c33
8 files changed, 79 insertions, 56 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 47263ac..1d33fec 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1004,7 +1004,7 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
if (pos < size) {
retval = generic_file_direct_IO(READ, iocb,
iov, pos, nr_segs);
- if (retval >= 0 && !is_sync_kiocb(iocb))
+ if (retval > 0 && !is_sync_kiocb(iocb))
retval = -EIOCBQUEUED;
if (retval > 0)
*ppos = pos + retval;
diff --git a/mm/memory.c b/mm/memory.c
index 6bad4c4..d209f74 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1701,12 +1701,13 @@ static int do_swap_page(struct mm_struct * mm,
spin_lock(&mm->page_table_lock);
page_table = pte_offset_map(pmd, address);
if (unlikely(!pte_same(*page_table, orig_pte))) {
- pte_unmap(page_table);
- spin_unlock(&mm->page_table_lock);
- unlock_page(page);
- page_cache_release(page);
ret = VM_FAULT_MINOR;
- goto out;
+ goto out_nomap;
+ }
+
+ if (unlikely(!PageUptodate(page))) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_nomap;
}
/* The page isn't present yet, go ahead with the fault. */
@@ -1741,6 +1742,12 @@ static int do_swap_page(struct mm_struct * mm,
spin_unlock(&mm->page_table_lock);
out:
return ret;
+out_nomap:
+ pte_unmap(page_table);
+ spin_unlock(&mm->page_table_lock);
+ unlock_page(page);
+ page_cache_release(page);
+ goto out;
}
/*
diff --git a/mm/mmap.c b/mm/mmap.c
index 01f9793..de54acd 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1244,7 +1244,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = mm->free_area_cache;
/* make sure it can fit in the remaining address space */
- if (addr >= len) {
+ if (addr > len) {
vma = find_vma(mm, addr-len);
if (!vma || addr <= vma->vm_start)
/* remember the address as a hint for next time */
@@ -1266,7 +1266,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
/* try just below the current vma->vm_start */
addr = vma->vm_start-len;
- } while (len <= vma->vm_start);
+ } while (len < vma->vm_start);
/*
* A failed mmap() very likely causes application failure,
@@ -1302,37 +1302,40 @@ unsigned long
get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
- if (flags & MAP_FIXED) {
- unsigned long ret;
+ unsigned long ret;
- if (addr > TASK_SIZE - len)
- return -ENOMEM;
- if (addr & ~PAGE_MASK)
- return -EINVAL;
- if (file && is_file_hugepages(file)) {
- /*
- * Check if the given range is hugepage aligned, and
- * can be made suitable for hugepages.
- */
- ret = prepare_hugepage_range(addr, len);
- } else {
- /*
- * Ensure that a normal request is not falling in a
- * reserved hugepage range. For some archs like IA-64,
- * there is a separate region for hugepages.
- */
- ret = is_hugepage_only_range(current->mm, addr, len);
- }
- if (ret)
- return -EINVAL;
- return addr;
- }
+ if (!(flags & MAP_FIXED)) {
+ unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
- if (file && file->f_op && file->f_op->get_unmapped_area)
- return file->f_op->get_unmapped_area(file, addr, len,
- pgoff, flags);
+ get_area = current->mm->get_unmapped_area;
+ if (file && file->f_op && file->f_op->get_unmapped_area)
+ get_area = file->f_op->get_unmapped_area;
+ addr = get_area(file, addr, len, pgoff, flags);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+ }
- return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+ if (addr > TASK_SIZE - len)
+ return -ENOMEM;
+ if (addr & ~PAGE_MASK)
+ return -EINVAL;
+ if (file && is_file_hugepages(file)) {
+ /*
+ * Check if the given range is hugepage aligned, and
+ * can be made suitable for hugepages.
+ */
+ ret = prepare_hugepage_range(addr, len);
+ } else {
+ /*
+ * Ensure that a normal request is not falling in a
+ * reserved hugepage range. For some archs like IA-64,
+ * there is a separate region for hugepages.
+ */
+ ret = is_hugepage_only_range(current->mm, addr, len);
+ }
+ if (ret)
+ return -EINVAL;
+ return addr;
}
EXPORT_SYMBOL(get_unmapped_area);
diff --git a/mm/mremap.c b/mm/mremap.c
index 0dd7ace..ec7238a 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -224,6 +224,12 @@ static unsigned long move_vma(struct vm_area_struct *vma,
split = 1;
}
+ /*
+ * if we failed to move page tables we still do total_vm increment
+ * since do_munmap() will decrement it by old_len == new_len
+ */
+ mm->total_vm += new_len >> PAGE_SHIFT;
+
if (do_munmap(mm, old_addr, old_len) < 0) {
/* OOM: unable to split vma, just get accounts right */
vm_unacct_memory(excess >> PAGE_SHIFT);
@@ -237,7 +243,6 @@ static unsigned long move_vma(struct vm_area_struct *vma,
vma->vm_next->vm_flags |= VM_ACCOUNT;
}
- mm->total_vm += new_len >> PAGE_SHIFT;
__vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
if (vm_flags & VM_LOCKED) {
mm->locked_vm += new_len >> PAGE_SHIFT;
diff --git a/mm/nommu.c b/mm/nommu.c
index b293ec1..c53e9c8 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -150,7 +150,8 @@ void vfree(void *addr)
kfree(addr);
}
-void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
+void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask,
+ pgprot_t prot)
{
/*
* kmalloc doesn't like __GFP_HIGHMEM for some reason
diff --git a/mm/rmap.c b/mm/rmap.c
index 378de23..9827409 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -586,7 +586,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
dec_mm_counter(mm, anon_rss);
}
- inc_mm_counter(mm, rss);
+ dec_mm_counter(mm, rss);
page_remove_rmap(page);
page_cache_release(page);
@@ -626,7 +626,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
- pte_t *pte;
+ pte_t *pte, *original_pte;
pte_t pteval;
struct page *page;
unsigned long address;
@@ -658,7 +658,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
if (!pmd_present(*pmd))
goto out_unlock;
- for (pte = pte_offset_map(pmd, address);
+ for (original_pte = pte = pte_offset_map(pmd, address);
address < end; pte++, address += PAGE_SIZE) {
if (!pte_present(*pte))
@@ -694,7 +694,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
(*mapcount)--;
}
- pte_unmap(pte);
+ pte_unmap(original_pte);
out_unlock:
spin_unlock(&mm->page_table_lock);
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index a60e007..da48405 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -79,7 +79,7 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
WARN_ON(page_count(page) <= 1);
bdi = bdev->bd_inode->i_mapping->backing_dev_info;
- bdi->unplug_io_fn(bdi, page);
+ blk_run_backing_dev(bdi, page);
}
up_read(&swap_unplug_sem);
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 2bd83e5..8ff16a1 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -248,31 +248,20 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
}
-/**
- * remove_vm_area - find and remove a contingous kernel virtual area
- *
- * @addr: base address
- *
- * Search for the kernel VM area starting at @addr, and remove it.
- * This function returns the found VM area, but using it is NOT safe
- * on SMP machines.
- */
-struct vm_struct *remove_vm_area(void *addr)
+/* Caller must hold vmlist_lock */
+struct vm_struct *__remove_vm_area(void *addr)
{
struct vm_struct **p, *tmp;
- write_lock(&vmlist_lock);
for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
if (tmp->addr == addr)
goto found;
}
- write_unlock(&vmlist_lock);
return NULL;
found:
unmap_vm_area(tmp);
*p = tmp->next;
- write_unlock(&vmlist_lock);
/*
* Remove the guard page.
@@ -281,6 +270,24 @@ found:
return tmp;
}
+/**
+ * remove_vm_area - find and remove a contingous kernel virtual area
+ *
+ * @addr: base address
+ *
+ * Search for the kernel VM area starting at @addr, and remove it.
+ * This function returns the found VM area, but using it is NOT safe
+ * on SMP machines, except for its size or flags.
+ */
+struct vm_struct *remove_vm_area(void *addr)
+{
+ struct vm_struct *v;
+ write_lock(&vmlist_lock);
+ v = __remove_vm_area(addr);
+ write_unlock(&vmlist_lock);
+ return v;
+}
+
void __vunmap(void *addr, int deallocate_pages)
{
struct vm_struct *area;
OpenPOWER on IntegriCloud