From 1ff6bbfd13ca2c114a5cb58e1a92d1e5d68ce0b7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 28 Jan 2014 18:10:37 -0500 Subject: arm, pm, vmpressure: add missing slab.h includes arch/arm/mach-tegra/pm.c, kernel/power/console.c and mm/vmpressure.c were somehow getting slab.h indirectly through cgroup.h which in turn was getting it indirectly through xattr.h. A scheduled cgroup change drops xattr.h inclusion from cgroup.h and breaks compilation of these three files. Add explicit slab.h includes to the three files. A pending cgroup patch depends on this change and it'd be great if this can be routed through cgroup/for-3.14-fixes branch. Signed-off-by: Tejun Heo Acked-by: Stephen Warren Cc: Thierry Reding Cc: linux-tegra@vger.kernel.org Cc: "Rafael J. Wysocki" Cc: linux-pm@vger.kernel.org Cc: Johannes Weiner Cc: Michal Hocko Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: cgroups@vger.kernel.org --- mm/vmpressure.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm') diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 196970a..d4042e7 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include -- cgit v1.1 From 9d85d5863fa4818eb7fa306563bf830c0210c3a6 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 12 Feb 2014 09:13:37 +0530 Subject: mm: Dirty accountable change only apply to non prot numa case So move it within the if loop Acked-by: Mel Gorman Reviewed-by: Rik van Riel Signed-off-by: Aneesh Kumar K.V Signed-off-by: Benjamin Herrenschmidt --- mm/mprotect.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) (limited to 'mm') diff --git a/mm/mprotect.c b/mm/mprotect.c index 7332c17..33eab90 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -58,6 +58,13 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, if (pte_numa(ptent)) ptent = pte_mknonnuma(ptent); ptent = pte_modify(ptent, newprot); + /* + * Avoid taking write faults for pages we + * know to be dirty. + */ + if (dirty_accountable && pte_dirty(ptent)) + ptent = pte_mkwrite(ptent); + ptep_modify_prot_commit(mm, addr, pte, ptent); updated = true; } else { struct page *page; @@ -72,22 +79,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, } } } - - /* - * Avoid taking write faults for pages we know to be - * dirty. - */ - if (dirty_accountable && pte_dirty(ptent)) { - ptent = pte_mkwrite(ptent); - updated = true; - } - if (updated) pages++; - - /* Only !prot_numa always clears the pte */ - if (!prot_numa) - ptep_modify_prot_commit(mm, addr, pte, ptent); } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { swp_entry_t entry = pte_to_swp_entry(oldpte); -- cgit v1.1 From 56eecdb912b536a4fa97fb5bfe5a940a54d79be6 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 12 Feb 2014 09:13:38 +0530 Subject: mm: Use ptep/pmdp_set_numa() for updating _PAGE_NUMA bit Archs like ppc64 doesn't do tlb flush in set_pte/pmd functions when using a hash table MMU for various reasons (the flush is handled as part of the PTE modification when necessary). ppc64 thus doesn't implement flush_tlb_range for hash based MMUs. Additionally ppc64 require the tlb flushing to be batched within ptl locks. The reason to do that is to ensure that the hash page table is in sync with linux page table. We track the hpte index in linux pte and if we clear them without flushing hash and drop the ptl lock, we can have another cpu update the pte and can end up with duplicate entry in the hash table, which is fatal. We also want to keep set_pte_at simpler by not requiring them to do hash flush for performance reason. We do that by assuming that set_pte_at() is never *ever* called on a PTE that is already valid. This was the case until the NUMA code went in which broke that assumption. Fix that by introducing a new pair of helpers to set _PAGE_NUMA in a way similar to ptep/pmdp_set_wrprotect(), with a generic implementation using set_pte_at() and a powerpc specific one using the appropriate mechanism needed to keep the hash table in sync. Acked-by: Mel Gorman Reviewed-by: Rik van Riel Signed-off-by: Aneesh Kumar K.V Signed-off-by: Benjamin Herrenschmidt --- mm/huge_memory.c | 9 ++------- mm/mprotect.c | 4 +--- 2 files changed, 3 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 82166bf..da23eb9 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1545,6 +1545,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, entry = pmd_mknonnuma(entry); entry = pmd_modify(entry, newprot); ret = HPAGE_PMD_NR; + set_pmd_at(mm, addr, pmd, entry); BUG_ON(pmd_write(entry)); } else { struct page *page = pmd_page(*pmd); @@ -1557,16 +1558,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, */ if (!is_huge_zero_page(page) && !pmd_numa(*pmd)) { - entry = *pmd; - entry = pmd_mknuma(entry); + pmdp_set_numa(mm, addr, pmd); ret = HPAGE_PMD_NR; } } - - /* Set PMD if cleared earlier */ - if (ret == HPAGE_PMD_NR) - set_pmd_at(mm, addr, pmd, entry); - spin_unlock(ptl); } diff --git a/mm/mprotect.c b/mm/mprotect.c index 33eab90..769a67a 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -69,12 +69,10 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, } else { struct page *page; - ptent = *pte; page = vm_normal_page(vma, addr, oldpte); if (page && !PageKsm(page)) { if (!pte_numa(oldpte)) { - ptent = pte_mknuma(ptent); - set_pte_at(mm, addr, pte, ptent); + ptep_set_numa(mm, addr, pte); updated = true; } } -- cgit v1.1