summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorStephen Wilson <wilsons@start.ca>2011-03-13 15:49:15 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2011-03-23 16:36:54 -0400
commit31db58b3ab432f72ea76be58b12e6ffaf627d5db (patch)
treec88b742e1f2c52045d5abc6d35d7492ebdf64541 /mm
parent375906f8765e131a4a159b1ffebf78c15db7b3bf (diff)
downloadop-kernel-dev-31db58b3ab432f72ea76be58b12e6ffaf627d5db.zip
op-kernel-dev-31db58b3ab432f72ea76be58b12e6ffaf627d5db.tar.gz
mm: arch: make get_gate_vma take an mm_struct instead of a task_struct
Morally, the presence of a gate vma is more an attribute of a particular mm than a particular task. Moreover, dropping the dependency on task_struct will help make both existing and future operations on mm's more flexible and convenient. Signed-off-by: Stephen Wilson <wilsons@start.ca> Reviewed-by: Michel Lespinasse <walken@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c4
-rw-r--r--mm/mlock.c4
2 files changed, 4 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c
index e48945a..b6dc370 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1488,7 +1488,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(tsk, start)) {
unsigned long pg = start & PAGE_MASK;
- struct vm_area_struct *gate_vma = get_gate_vma(tsk);
+ struct vm_area_struct *gate_vma = get_gate_vma(tsk->mm);
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
@@ -3496,7 +3496,7 @@ static int __init gate_vma_init(void)
__initcall(gate_vma_init);
#endif
-struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
+struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
#ifdef AT_SYSINFO_EHDR
return &gate_vma;
diff --git a/mm/mlock.c b/mm/mlock.c
index c3924c7f..2689a08c 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -237,7 +237,7 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
is_vm_hugetlb_page(vma) ||
- vma == get_gate_vma(current))) {
+ vma == get_gate_vma(current->mm))) {
__mlock_vma_pages_range(vma, start, end, NULL);
@@ -332,7 +332,7 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
int lock = newflags & VM_LOCKED;
if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
- is_vm_hugetlb_page(vma) || vma == get_gate_vma(current))
+ is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
goto out; /* don't set VM_LOCKED, don't count */
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
OpenPOWER on IntegriCloud