diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-10-07 11:29:20 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-10-09 14:47:45 +0200 |
commit | 90572890d202527c366aa9489b32404e88a7c020 (patch) | |
tree | 0577f3b043e312f6d53e50105b236514f7df2455 /mm/mprotect.c | |
parent | e1dda8a797b59d7ec4b17e393152ec3273a552d5 (diff) | |
download | op-kernel-dev-90572890d202527c366aa9489b32404e88a7c020.zip op-kernel-dev-90572890d202527c366aa9489b32404e88a7c020.tar.gz |
mm: numa: Change page last {nid,pid} into {cpu,pid}
Change the per page last fault tracking to use cpu,pid instead of
nid,pid. This will allow us to try and lookup the alternate task more
easily. Note that even though it is the cpu that is store in the page
flags that the mpol_misplaced decision is still based on the node.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/1381141781-10992-43-git-send-email-mgorman@suse.de
[ Fixed build failure on 32-bit systems. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/mprotect.c')
-rw-r--r-- | mm/mprotect.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c index 5aae390..9a74855 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -37,14 +37,14 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot, - int dirty_accountable, int prot_numa, bool *ret_all_same_nidpid) + int dirty_accountable, int prot_numa, bool *ret_all_same_cpupid) { struct mm_struct *mm = vma->vm_mm; pte_t *pte, oldpte; spinlock_t *ptl; unsigned long pages = 0; - bool all_same_nidpid = true; - int last_nid = -1; + bool all_same_cpupid = true; + int last_cpu = -1; int last_pid = -1; pte = pte_offset_map_lock(mm, pmd, addr, &ptl); @@ -64,17 +64,17 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, page = vm_normal_page(vma, addr, oldpte); if (page) { - int nidpid = page_nidpid_last(page); - int this_nid = nidpid_to_nid(nidpid); - int this_pid = nidpid_to_pid(nidpid); + int cpupid = page_cpupid_last(page); + int this_cpu = cpupid_to_cpu(cpupid); + int this_pid = cpupid_to_pid(cpupid); - if (last_nid == -1) - last_nid = this_nid; + if (last_cpu == -1) + last_cpu = this_cpu; if (last_pid == -1) last_pid = this_pid; - if (last_nid != this_nid || + if (last_cpu != this_cpu || last_pid != this_pid) { - all_same_nidpid = false; + all_same_cpupid = false; } if (!pte_numa(oldpte)) { @@ -115,7 +115,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, arch_leave_lazy_mmu_mode(); pte_unmap_unlock(pte - 1, ptl); - *ret_all_same_nidpid = all_same_nidpid; + *ret_all_same_cpupid = all_same_cpupid; return pages; } @@ -142,7 +142,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pmd_t *pmd; unsigned long next; unsigned long pages = 0; - bool all_same_nidpid; + bool all_same_cpupid; pmd = pmd_offset(pud, addr); do { @@ -168,7 +168,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, if (pmd_none_or_clear_bad(pmd)) continue; this_pages = change_pte_range(vma, pmd, addr, next, newprot, - dirty_accountable, prot_numa, &all_same_nidpid); + dirty_accountable, prot_numa, &all_same_cpupid); pages += this_pages; /* @@ -177,7 +177,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, * node. This allows a regular PMD to be handled as one fault * and effectively batches the taking of the PTL */ - if (prot_numa && this_pages && all_same_nidpid) + if (prot_numa && this_pages && all_same_cpupid) change_pmd_protnuma(vma->vm_mm, addr, pmd); } while (pmd++, addr = next, addr != end); |