diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-05-24 17:11:50 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 08:39:13 -0700 |
commit | 90f08e399d054d017c0e2c5089a0f44a76418271 (patch) | |
tree | 771d8a9ff675e3fe5deda53d0701e41f1f15e9a1 /arch/sparc/mm | |
parent | d6bf29b44ddf3ca915f77b9383bee8b7a209f3fd (diff) | |
download | op-kernel-dev-90f08e399d054d017c0e2c5089a0f44a76418271.zip op-kernel-dev-90f08e399d054d017c0e2c5089a0f44a76418271.tar.gz |
sparc: mmu_gather rework
Rework the sparc mmu_gather usage to conform to the new world order :-)
Sparc mmu_gather does two things:
- tracks vaddrs to unhash
- tracks pages to free
Split these two things like powerpc has done and keep the vaddrs
in per-cpu data structures and flush them on context switch.
The remaining bits can then use the generic mmu_gather.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r-- | arch/sparc/mm/tlb.c | 43 | ||||
-rw-r--r-- | arch/sparc/mm/tsb.c | 15 |
2 files changed, 32 insertions, 26 deletions
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index d8f21e2..b1f279c 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -19,33 +19,34 @@ /* Heavily inspired by the ppc64 code. */ -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); +static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); void flush_tlb_pending(void) { - struct mmu_gather *mp = &get_cpu_var(mmu_gathers); + struct tlb_batch *tb = &get_cpu_var(tlb_batch); - if (mp->tlb_nr) { - flush_tsb_user(mp); + if (tb->tlb_nr) { + flush_tsb_user(tb); - if (CTX_VALID(mp->mm->context)) { + if (CTX_VALID(tb->mm->context)) { #ifdef CONFIG_SMP - smp_flush_tlb_pending(mp->mm, mp->tlb_nr, - &mp->vaddrs[0]); + smp_flush_tlb_pending(tb->mm, tb->tlb_nr, + &tb->vaddrs[0]); #else - __flush_tlb_pending(CTX_HWBITS(mp->mm->context), - mp->tlb_nr, &mp->vaddrs[0]); + __flush_tlb_pending(CTX_HWBITS(tb->mm->context), + tb->tlb_nr, &tb->vaddrs[0]); #endif } - mp->tlb_nr = 0; + tb->tlb_nr = 0; } - put_cpu_var(mmu_gathers); + put_cpu_var(tlb_batch); } -void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) +void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, + pte_t *ptep, pte_t orig, int fullmm) { - struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); + struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; vaddr &= PAGE_MASK; @@ -77,21 +78,25 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t no_cache_flush: - if (mp->fullmm) + if (fullmm) { + put_cpu_var(tlb_batch); return; + } - nr = mp->tlb_nr; + nr = tb->tlb_nr; - if (unlikely(nr != 0 && mm != mp->mm)) { + if (unlikely(nr != 0 && mm != tb->mm)) { flush_tlb_pending(); nr = 0; } if (nr == 0) - mp->mm = mm; + tb->mm = mm; - mp->vaddrs[nr] = vaddr; - mp->tlb_nr = ++nr; + tb->vaddrs[nr] = vaddr; + tb->tlb_nr = ++nr; if (nr >= TLB_BATCH_NR) flush_tlb_pending(); + + put_cpu_var(tlb_batch); } diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 101d7c8..9484615 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -47,12 +47,13 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) } } -static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries) +static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, + unsigned long tsb, unsigned long nentries) { unsigned long i; - for (i = 0; i < mp->tlb_nr; i++) { - unsigned long v = mp->vaddrs[i]; + for (i = 0; i < tb->tlb_nr; i++) { + unsigned long v = tb->vaddrs[i]; unsigned long tag, ent, hash; v &= ~0x1UL; @@ -65,9 +66,9 @@ static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, uns } } -void flush_tsb_user(struct mmu_gather *mp) +void flush_tsb_user(struct tlb_batch *tb) { - struct mm_struct *mm = mp->mm; + struct mm_struct *mm = tb->mm; unsigned long nentries, base, flags; spin_lock_irqsave(&mm->context.lock, flags); @@ -76,7 +77,7 @@ void flush_tsb_user(struct mmu_gather *mp) nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); - __flush_tsb_one(mp, PAGE_SHIFT, base, nentries); + __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); #ifdef CONFIG_HUGETLB_PAGE if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { @@ -84,7 +85,7 @@ void flush_tsb_user(struct mmu_gather *mp) nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); - __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries); + __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries); } #endif spin_unlock_irqrestore(&mm->context.lock, flags); |