diff options
author | Christoph Lameter <clameter@engr.sgi.com> | 2006-01-06 00:10:49 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-06 08:33:23 -0800 |
commit | 6bda666a03f063968833760c5bb5c13062ab9291 (patch) | |
tree | 8ecc0b672c059aa296f80935cda33f3e59970832 | |
parent | 21abb1478a87e26f5fa71dbcb7cf4264272c2248 (diff) | |
download | op-kernel-dev-6bda666a03f063968833760c5bb5c13062ab9291.zip op-kernel-dev-6bda666a03f063968833760c5bb5c13062ab9291.tar.gz |
[PATCH] hugepages: fold find_or_alloc_pages into huge_no_page()
The number of parameters for find_or_alloc_page increases significantly after
policy support is added to huge pages. Simplify the code by folding
find_or_alloc_huge_page() into hugetlb_no_page().
Adam Litke objected to this piece in an earlier patch but I think this is a
good simplification. Diffstat shows that we can get rid of almost half of the
lines of find_or_alloc_page(). If we can find no consensus then lets simply
drop this patch.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Andi Kleen <ak@muc.de>
Acked-by: William Lee Irwin III <wli@holomorphy.com>
Cc: Adam Litke <agl@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | mm/hugetlb.c | 66 |
1 files changed, 24 insertions, 42 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index eb40556..f4c43d7 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -368,43 +368,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, flush_tlb_range(vma, start, end); } -static struct page *find_or_alloc_huge_page(struct vm_area_struct *vma, - unsigned long addr, struct address_space *mapping, - unsigned long idx, int shared) -{ - struct page *page; - int err; - -retry: - page = find_lock_page(mapping, idx); - if (page) - goto out; - - if (hugetlb_get_quota(mapping)) - goto out; - page = alloc_huge_page(vma, addr); - if (!page) { - hugetlb_put_quota(mapping); - goto out; - } - - if (shared) { - err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); - if (err) { - put_page(page); - hugetlb_put_quota(mapping); - if (err == -EEXIST) - goto retry; - page = NULL; - } - } else { - /* Caller expects a locked page */ - lock_page(page); - } -out: - return page; -} - static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t pte) { @@ -471,12 +434,31 @@ int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, * Use page lock to guard against racing truncation * before we get page_table_lock. */ - page = find_or_alloc_huge_page(vma, address, mapping, idx, - vma->vm_flags & VM_SHARED); - if (!page) - goto out; +retry: + page = find_lock_page(mapping, idx); + if (!page) { + if (hugetlb_get_quota(mapping)) + goto out; + page = alloc_huge_page(vma, address); + if (!page) { + hugetlb_put_quota(mapping); + goto out; + } - BUG_ON(!PageLocked(page)); + if (vma->vm_flags & VM_SHARED) { + int err; + + err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); + if (err) { + put_page(page); + hugetlb_put_quota(mapping); + if (err == -EEXIST) + goto retry; + goto out; + } + } else + lock_page(page); + } spin_lock(&mm->page_table_lock); size = i_size_read(mapping->host) >> HPAGE_SHIFT; |