From 620905405b6e6531c86e00a73a15f429270d79fd Mon Sep 17 00:00:00 2001 From: gonzo Date: Mon, 31 Dec 2012 21:00:38 +0000 Subject: Merge r234561 from busdma_machdep.c to ARMv6 version of busdma: Interrupts must be disabled while handling a partial cache line flush, as otherwise the interrupt handling code may modify data in the non-DMA part of the cache line while we have it stashed away in the temporary stack buffer, then we end up restoring a stale value. PR: 160431 Submitted by: Ian Lepore --- sys/arm/arm/busdma_machdep-v6.c | 52 ++++++++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 19 deletions(-) (limited to 'sys/arm') diff --git a/sys/arm/arm/busdma_machdep-v6.c b/sys/arm/arm/busdma_machdep-v6.c index 1407fa1..2a7c23b 100644 --- a/sys/arm/arm/busdma_machdep-v6.c +++ b/sys/arm/arm/busdma_machdep-v6.c @@ -1347,35 +1347,49 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) while (sl != NULL) { /* write back the unaligned portions */ vm_paddr_t physaddr; + register_t s = 0; + buf = sl->vaddr; len = sl->datacount; physaddr = sl->busaddr; bbuf = buf & ~arm_dcache_align_mask; ebuf = buf + len; physaddr = physaddr & ~arm_dcache_align_mask; - unalign = buf & arm_dcache_align_mask; - if (unalign) { - memcpy(_tmp_cl, (void *)bbuf, unalign); - len += unalign; /* inv entire cache line */ - } - unalign = ebuf & arm_dcache_align_mask; - if (unalign) { - unalign = arm_dcache_align - unalign; - memcpy(_tmp_clend, (void *)ebuf, unalign); - len += unalign; /* inv entire cache line */ + + + if ((buf & arm_dcache_align_mask) || + (ebuf & arm_dcache_align_mask)) { + s = intr_disable(); + unalign = buf & arm_dcache_align_mask; + if (unalign) { + memcpy(_tmp_cl, (void *)bbuf, unalign); + len += unalign; /* inv entire cache line */ + } + + unalign = ebuf & arm_dcache_align_mask; + if (unalign) { + unalign = arm_dcache_align - unalign; + memcpy(_tmp_clend, (void *)ebuf, unalign); + len += unalign; /* inv entire cache line */ + } } - /* inv are cache length aligned */ + + /* inv are cache length aligned */ cpu_dcache_inv_range(bbuf, len); l2cache_inv_range(bbuf, physaddr, len); - unalign = (vm_offset_t)buf & arm_dcache_align_mask; - if (unalign) { - memcpy((void *)bbuf, _tmp_cl, unalign); - } - unalign = ebuf & arm_dcache_align_mask; - if (unalign) { - unalign = arm_dcache_align - unalign; - memcpy((void *)ebuf, _tmp_clend, unalign); + if ((buf & arm_dcache_align_mask) || + (ebuf & arm_dcache_align_mask)) { + unalign = (vm_offset_t)buf & arm_dcache_align_mask; + if (unalign) + memcpy((void *)bbuf, _tmp_cl, unalign); + + unalign = ebuf & arm_dcache_align_mask; + if (unalign) + memcpy((void *)ebuf, _tmp_clend, + arm_dcache_align - unalign); + + intr_restore(s); } sl = STAILQ_NEXT(sl, slinks); } -- cgit v1.1