From 0c8c77d35582c3f7989f1316368da5ae7f14ad4b Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Wed, 23 Apr 2014 20:58:45 +0200 Subject: s390/ccwgroup: Fix memory corruption commit 0b60f9ead5d4816e7e3d6e28f4a0d22d4a1b2513 (s390: use device_remove_file_self() instead of device_schedule_callback()) caused random memory corruption on my s390 box. Turns out that the last element of the ccwgroup structure is of dynamic size, so we must move the newly introduced work structure _before_ the zero length array. Signed-off-by: Christian Borntraeger CC: Tejun Heo CC: Martin Schwidefsky CC: Heiko Carstens CC: Sebastian Ott CC: Peter Oberparleiter Acked-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- arch/s390/include/asm/ccwgroup.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h index 6e670f8..ebc2913 100644 --- a/arch/s390/include/asm/ccwgroup.h +++ b/arch/s390/include/asm/ccwgroup.h @@ -22,8 +22,8 @@ struct ccwgroup_device { /* public: */ unsigned int count; struct device dev; - struct ccw_device *cdev[0]; struct work_struct ungroup_work; + struct ccw_device *cdev[0]; }; /** -- cgit v1.1 From 1cf35d47712dd5dc4d62c6ce984f04ac6eab0408 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 25 Apr 2014 16:05:40 -0700 Subject: mm: split 'tlb_flush_mmu()' into tlb flushing and memory freeing parts The mmu-gather operation 'tlb_flush_mmu()' has done two things: the actual tlb flush operation, and the batched freeing of the pages that the TLB entries pointed at. This splits the operation into separate phases, so that the forced batched flushing done by zap_pte_range() can now do the actual TLB flush while still holding the page table lock, but delay the batched freeing of all the pages to after the lock has been dropped. This in turn allows us to avoid a race condition between set_page_dirty() (as called by zap_pte_range() when it finds a dirty shared memory pte) and page_mkclean(): because we now flush all the dirty page data from the TLB's while holding the pte lock, page_mkclean() will be held up walking the (recently cleaned) page tables until after the TLB entries have been flushed from all CPU's. Reported-by: Benjamin Herrenschmidt Tested-by: Dave Hansen Acked-by: Hugh Dickins Cc: Peter Zijlstra Cc: Russell King - ARM Linux Cc: Tony Luck Signed-off-by: Linus Torvalds --- arch/s390/include/asm/tlb.h | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index c544b6f..a25f09f 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -59,12 +59,23 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb, tlb->batch = NULL; } -static inline void tlb_flush_mmu(struct mmu_gather *tlb) +static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { __tlb_flush_mm_lazy(tlb->mm); +} + +static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ tlb_table_flush(tlb); } + +static inline void tlb_flush_mmu(struct mmu_gather *tlb) +{ + tlb_flush_mmu_tlbonly(tlb); + tlb_flush_mmu_free(tlb); +} + static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { -- cgit v1.1