diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2013-08-16 13:31:40 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2013-08-22 12:20:09 +0200 |
commit | 5c474a1e2265c5156e6c63f87a7e99053039b8b9 (patch) | |
tree | a50c365553dbaf2c3d97c8a0dbca8c94cec34e9f /arch/s390 | |
parent | b6bed093f489ef0a858e63eebcf5f2fb71ed3222 (diff) | |
download | op-kernel-dev-5c474a1e2265c5156e6c63f87a7e99053039b8b9.zip op-kernel-dev-5c474a1e2265c5156e6c63f87a7e99053039b8b9.tar.gz |
s390/mm: introduce ptep_flush_lazy helper
Isolate the logic of IDTE vs. IPTE flushing of ptes in two functions,
ptep_flush_lazy and __tlb_flush_mm_lazy.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 31 | ||||
-rw-r--r-- | arch/s390/include/asm/tlb.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/tlbflush.h | 6 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 6 |
5 files changed, 24 insertions, 25 deletions
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 084e775..7b7fce4 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -77,8 +77,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, WARN_ON(atomic_read(&prev->context.attach_count) < 0); atomic_inc(&next->context.attach_count); /* Check for TLBs not flushed yet */ - if (next->context.flush_mm) - __tlb_flush_mm(next); + __tlb_flush_mm_lazy(next); } #define enter_lazy_tlb(mm,tsk) do { } while (0) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 26ee897..125e379 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -414,12 +414,6 @@ extern unsigned long MODULES_END; #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT) #define SEGMENT_WRITE __pgprot(0) -static inline int mm_exclusive(struct mm_struct *mm) -{ - return likely(mm == current->active_mm && - atomic_read(&mm->context.attach_count) <= 1); -} - static inline int mm_has_pgste(struct mm_struct *mm) { #ifdef CONFIG_PGSTE @@ -1037,6 +1031,17 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep) } } +static inline void ptep_flush_lazy(struct mm_struct *mm, + unsigned long address, pte_t *ptep) +{ + int active = (mm == current->active_mm) ? 1 : 0; + + if (atomic_read(&mm->context.attach_count) > active) + __ptep_ipte(address, ptep); + else + mm->context.flush_mm = 1; +} + /* * This is hard to understand. ptep_get_and_clear and ptep_clear_flush * both clear the TLB for the unmapped pte. The reason is that @@ -1057,15 +1062,13 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, pgste_t pgste; pte_t pte; - mm->context.flush_mm = 1; if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); pgste = pgste_ipte_notify(mm, address, ptep, pgste); } pte = *ptep; - if (!mm_exclusive(mm)) - __ptep_ipte(address, ptep); + ptep_flush_lazy(mm, address, ptep); pte_val(*ptep) = _PAGE_INVALID; if (mm_has_pgste(mm)) { @@ -1083,15 +1086,13 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, pgste_t pgste; pte_t pte; - mm->context.flush_mm = 1; if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); pgste_ipte_notify(mm, address, ptep, pgste); } pte = *ptep; - if (!mm_exclusive(mm)) - __ptep_ipte(address, ptep); + ptep_flush_lazy(mm, address, ptep); if (mm_has_pgste(mm)) { pgste = pgste_update_all(&pte, pgste); @@ -1160,7 +1161,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, pte = *ptep; if (!full) - __ptep_ipte(address, ptep); + ptep_flush_lazy(mm, address, ptep); pte_val(*ptep) = _PAGE_INVALID; if (!full && mm_has_pgste(mm)) { @@ -1178,14 +1179,12 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, pte_t pte = *ptep; if (pte_write(pte)) { - mm->context.flush_mm = 1; if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); pgste = pgste_ipte_notify(mm, address, ptep, pgste); } - if (!mm_exclusive(mm)) - __ptep_ipte(address, ptep); + ptep_flush_lazy(mm, address, ptep); pte = pte_wrprotect(pte); if (mm_has_pgste(mm)) { diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 6d6d92b..2cb846c 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -63,13 +63,14 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb, static inline void tlb_flush_mmu(struct mmu_gather *tlb) { + __tlb_flush_mm_lazy(tlb->mm); tlb_table_flush(tlb); } static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { - tlb_table_flush(tlb); + tlb_flush_mmu(tlb); } /* diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 6b32af3..f9fef04 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -86,7 +86,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) __tlb_flush_full(mm); } -static inline void __tlb_flush_mm_cond(struct mm_struct * mm) +static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) { if (mm->context.flush_mm) { __tlb_flush_mm(mm); @@ -118,13 +118,13 @@ static inline void __tlb_flush_mm_cond(struct mm_struct * mm) static inline void flush_tlb_mm(struct mm_struct *mm) { - __tlb_flush_mm_cond(mm); + __tlb_flush_mm_lazy(mm); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - __tlb_flush_mm_cond(vma->vm_mm); + __tlb_flush_mm_lazy(vma->vm_mm); } static inline void flush_tlb_kernel_range(unsigned long start, diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index b9d35d6..befaea7 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -1008,7 +1008,6 @@ void tlb_table_flush(struct mmu_gather *tlb) struct mmu_table_batch **batch = &tlb->batch; if (*batch) { - __tlb_flush_mm(tlb->mm); call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); *batch = NULL; } @@ -1018,11 +1017,12 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) { struct mmu_table_batch **batch = &tlb->batch; + tlb->mm->context.flush_mm = 1; if (*batch == NULL) { *batch = (struct mmu_table_batch *) __get_free_page(GFP_NOWAIT | __GFP_NOWARN); if (*batch == NULL) { - __tlb_flush_mm(tlb->mm); + __tlb_flush_mm_lazy(tlb->mm); tlb_remove_table_one(table); return; } @@ -1030,7 +1030,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) } (*batch)->tables[(*batch)->nr++] = table; if ((*batch)->nr == MAX_TABLE_BATCH) - tlb_table_flush(tlb); + tlb_flush_mmu(tlb); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE |