From f00a75c094c340c4e7435665816c3273c870e849 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 5 Oct 2009 15:17:45 +0100 Subject: ARM: Pass VMA to copy_user_highpage() implementations Our copy_user_highpage() implementations may require cache maintainence. Ensure that implementations have all necessary details to perform this maintainence. Signed-off-by: Russell King --- arch/arm/mm/copypage-feroceon.c | 2 +- arch/arm/mm/copypage-v3.c | 2 +- arch/arm/mm/copypage-v4mc.c | 2 +- arch/arm/mm/copypage-v4wb.c | 2 +- arch/arm/mm/copypage-v4wt.c | 2 +- arch/arm/mm/copypage-v6.c | 4 ++-- arch/arm/mm/copypage-xsc3.c | 2 +- arch/arm/mm/copypage-xscale.c | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index 70997d5..e2e8d29 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -68,7 +68,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom) } void feroceon_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index de9c068..f72303e 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c @@ -38,7 +38,7 @@ v3_copy_user_page(void *kto, const void *kfrom) } void v3_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 7370a71..598c51a 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -69,7 +69,7 @@ mc_copy_user_page(void *from, void *to) } void v4_mc_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto = kmap_atomic(to, KM_USER1); diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 9ab0984..e9920f6 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -48,7 +48,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom) } void v4wb_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index 300efaf..172e6a5 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -44,7 +44,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom) } void v4wt_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 4127a7b..334d560 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -34,7 +34,7 @@ static DEFINE_SPINLOCK(v6_lock); * attack the kernel's existing mapping of these pages. */ static void v6_copy_user_highpage_nonaliasing(struct page *to, - struct page *from, unsigned long vaddr) + struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; @@ -73,7 +73,7 @@ static void discard_old_kernel_data(void *kto) * Copy the page, taking account of the cache colour. */ static void v6_copy_user_highpage_aliasing(struct page *to, - struct page *from, unsigned long vaddr) + struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { unsigned int offset = CACHE_COLOUR(vaddr); unsigned long kfrom, kto; diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index bc4525f..18ae05d 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -71,7 +71,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom) } void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 76824d3..9920c0a 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -91,7 +91,7 @@ mc_copy_user_page(void *from, void *to) } void xscale_mc_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto = kmap_atomic(to, KM_USER1); -- cgit v1.1 From 2725898fc9bb2121ac0fb1b5e4faf4fc09014729 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 5 Oct 2009 15:34:22 +0100 Subject: ARM: Flush user mapping on VIVT processors when copying a page Steven Walter writes: > I've been tracking down an instance of userspace data corruption, > and I believe I have found a window during fork where data can be > lost. The corruption is occurring on an ARMv5 system with VIVT > caches. Here's the scenario in question. Thread A is forking, > Thread B is running in userspace: > > Thread A: flush_cache_mm() (dup_mmap) > Thread B: writes to a page in the above mm > Thread A: pte_wrprotect() the above page (copy_one_pte) > Thread B: writes to the same page again > > During thread B's second write, he'll take a fault and enter the > do_wp_page() case. We'll end up calling copy_page(), which notably > uses the kernel virtual addresses for the old and new pages. This > means that the new page does not necessarily have the data from the > first write. Now there are two conflicting copies of the same > cache-line in dcache. If the userspace cache-line flushes before > the kernel cache-line, we lose the changes made during the first > write. do_wp_page does call flush_dcache_page on the newly-copied > page, but there's still a window where the CPU could flush the > userspace cache-line before then. Resolve this by flushing the user mapping before copying the page on processors with a writeback VIVT cache. Note: this does have a performance impact, and so needs further consideration before being merged - can we optimize out some of the cache flushes if, eg, we know that the page isn't yet mapped? Thread: Signed-off-by: Russell King --- arch/arm/mm/copypage-feroceon.c | 1 + arch/arm/mm/copypage-v4wb.c | 1 + arch/arm/mm/copypage-xsc3.c | 1 + 3 files changed, 3 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index e2e8d29..5eb4fd9 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -74,6 +74,7 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from, kto = kmap_atomic(to, KM_USER0); kfrom = kmap_atomic(from, KM_USER1); + flush_cache_page(vma, vaddr, page_to_pfn(from)); feroceon_copy_user_page(kto, kfrom); kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kto, KM_USER0); diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index e9920f6..7c2eb55 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -54,6 +54,7 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, kto = kmap_atomic(to, KM_USER0); kfrom = kmap_atomic(from, KM_USER1); + flush_cache_page(vma, vaddr, page_to_pfn(from)); v4wb_copy_user_page(kto, kfrom); kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kto, KM_USER0); diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index 18ae05d..747ad41 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -77,6 +77,7 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, kto = kmap_atomic(to, KM_USER0); kfrom = kmap_atomic(from, KM_USER1); + flush_cache_page(vma, vaddr, page_to_pfn(from)); xsc3_mc_copy_user_page(kto, kfrom); kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kto, KM_USER0); -- cgit v1.1 From 2ef7f3dbd7a70a48c3f09b498df528cb00ea03a4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 5 Nov 2009 13:29:36 +0000 Subject: ARM: Fix ptrace accesses Signed-off-by: Russell King --- arch/arm/mm/flush.c | 51 +++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 43 insertions(+), 8 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 6f3a4b7..e34f095 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -87,13 +88,26 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) __flush_icache_all(); } +#else +#define flush_pfn_alias(pfn,vaddr) do { } while (0) +#endif +#ifdef CONFIG_SMP +static void flush_ptrace_access_other(void *args) +{ + __flush_icache_all(); +} +#endif + +static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len, int write) + unsigned long uaddr, void *kaddr, unsigned long len) { if (cache_is_vivt()) { - vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write); + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { + unsigned long addr = (unsigned long)kaddr; + __cpuc_coherent_kern_range(addr, addr + len); + } return; } @@ -104,16 +118,37 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, } /* VIPT non-aliasing cache */ - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) && - vma->vm_flags & VM_EXEC) { + if (vma->vm_flags & VM_EXEC) { unsigned long addr = (unsigned long)kaddr; - /* only flushing the kernel mapping on non-aliasing VIPT */ __cpuc_coherent_kern_range(addr, addr + len); +#ifdef CONFIG_SMP + if (cache_ops_need_broadcast()) + smp_call_function(flush_ptrace_access_other, + NULL, 1); +#endif } } -#else -#define flush_pfn_alias(pfn,vaddr) do { } while (0) + +/* + * Copy user data from/to a page which is mapped into a different + * processes address space. Really, we want to allow our "user + * space" model to handle this. + * + * Note that this code needs to run on the current CPU. + */ +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *dst, const void *src, + unsigned long len) +{ +#ifdef CONFIG_SMP + preempt_disable(); #endif + memcpy(dst, src, len); + flush_ptrace_access(vma, page, uaddr, dst, len); +#ifdef CONFIG_SMP + preempt_enable(); +#endif +} void __flush_dcache_page(struct address_space *mapping, struct page *page) { -- cgit v1.1 From c26c20b823d48addbde9cb5709d80655c6fadf18 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 16:21:35 +0000 Subject: ARM: make_coherent: split adjust_pte() in two adjust_pte() walks the page tables, and do_adjust_pte() does the page table manipulation. Signed-off-by: Russell King --- arch/arm/mm/fault-armv.c | 52 +++++++++++++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 20 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 56ee153..074e6bb5 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -36,28 +36,12 @@ static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; * Therefore those configurations which might call adjust_pte (those * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. */ -static int adjust_pte(struct vm_area_struct *vma, unsigned long address) +static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep) { - pgd_t *pgd; - pmd_t *pmd; - pte_t *pte, entry; + pte_t entry = *ptep; int ret; - pgd = pgd_offset(vma->vm_mm, address); - if (pgd_none(*pgd)) - goto no_pgd; - if (pgd_bad(*pgd)) - goto bad_pgd; - - pmd = pmd_offset(pgd, address); - if (pmd_none(*pmd)) - goto no_pmd; - if (pmd_bad(*pmd)) - goto bad_pmd; - - pte = pte_offset_map(pmd, address); - entry = *pte; - /* * If this page is present, it's actually being shared. */ @@ -74,10 +58,38 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) (pfn << PAGE_SHIFT) + PAGE_SIZE); pte_val(entry) &= ~L_PTE_MT_MASK; pte_val(entry) |= shared_pte_mask; - set_pte_at(vma->vm_mm, address, pte, entry); + set_pte_at(vma->vm_mm, address, ptep, entry); flush_tlb_page(vma, address); } + + return ret; +} + +static int adjust_pte(struct vm_area_struct *vma, unsigned long address) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int ret; + + pgd = pgd_offset(vma->vm_mm, address); + if (pgd_none(*pgd)) + goto no_pgd; + if (pgd_bad(*pgd)) + goto bad_pgd; + + pmd = pmd_offset(pgd, address); + if (pmd_none(*pmd)) + goto no_pmd; + if (pmd_bad(*pmd)) + goto bad_pmd; + + pte = pte_offset_map(pmd, address); + + ret = do_adjust_pte(vma, address, pte); + pte_unmap(pte); + return ret; bad_pgd: -- cgit v1.1 From f8a85f1164a33e3eb5b421b137ced793ed53ee33 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 16:23:44 +0000 Subject: ARM: make_coherent: convert adjust_pte() to use p*d_none_or_clear_bad() Signed-off-by: Russell King --- arch/arm/mm/fault-armv.c | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 074e6bb5..7a8efe1 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -73,16 +73,12 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) int ret; pgd = pgd_offset(vma->vm_mm, address); - if (pgd_none(*pgd)) - goto no_pgd; - if (pgd_bad(*pgd)) - goto bad_pgd; + if (pgd_none_or_clear_bad(pgd)) + return 0; pmd = pmd_offset(pgd, address); - if (pmd_none(*pmd)) - goto no_pmd; - if (pmd_bad(*pmd)) - goto bad_pmd; + if (pmd_none_or_clear_bad(pmd)) + return 0; pte = pte_offset_map(pmd, address); @@ -91,18 +87,6 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) pte_unmap(pte); return ret; - -bad_pgd: - pgd_ERROR(*pgd); - pgd_clear(pgd); -no_pgd: - return 0; - -bad_pmd: - pmd_ERROR(*pmd); - pmd_clear(pmd); -no_pmd: - return 0; } static void -- cgit v1.1 From 56dd47098abe1fdde598a8d8b7c04d775506f456 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 16:24:34 +0000 Subject: ARM: make_coherent: fix problems with highpte, part 1 update_mmu_cache() is called with a page table already mapped. We call make_coherent(), which then calls adjust_pte() which wants to map other page tables. This causes kmap_atomic() to BUG() because the slot its trying to use is already taken. Since do_adjust_pte() modifies the page tables, we are also missing any form of locking, so we're risking corrupting the page tables. Fix this by using pte_offset_map_nested(), and taking the pte page table lock around do_adjust_pte(). Signed-off-by: Russell King --- arch/arm/mm/fault-armv.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 7a8efe1..8e9bc51 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -67,6 +67,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, static int adjust_pte(struct vm_area_struct *vma, unsigned long address) { + spinlock_t *ptl; pgd_t *pgd; pmd_t *pmd; pte_t *pte; @@ -80,11 +81,19 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) if (pmd_none_or_clear_bad(pmd)) return 0; - pte = pte_offset_map(pmd, address); + /* + * This is called while another page table is mapped, so we + * must use the nested version. This also means we need to + * open-code the spin-locking. + */ + ptl = pte_lockptr(vma->vm_mm, pmd); + pte = pte_offset_map_nested(pmd, address); + spin_lock(ptl); ret = do_adjust_pte(vma, address, pte); - pte_unmap(pte); + spin_unlock(ptl); + pte_unmap_nested(pte); return ret; } -- cgit v1.1 From ed42acaef1a9d51631a31b55e9ed52d400430492 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 16:31:38 +0000 Subject: ARM: make_coherent: avoid recalculating the pfn for the modified page We already know the pfn for the page to be modified in make_coherent, so let's stop recalculating it unnecessarily. Signed-off-by: Russell King --- arch/arm/mm/fault-armv.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 8e9bc51..ae88f2c 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -37,7 +37,7 @@ static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. */ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, - pte_t *ptep) + unsigned long pfn, pte_t *ptep) { pte_t entry = *ptep; int ret; @@ -52,7 +52,6 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, * fault (ie, is old), we can safely ignore any issues. */ if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { - unsigned long pfn = pte_pfn(entry); flush_cache_page(vma, address, pfn); outer_flush_range((pfn << PAGE_SHIFT), (pfn << PAGE_SHIFT) + PAGE_SIZE); @@ -65,7 +64,8 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, return ret; } -static int adjust_pte(struct vm_area_struct *vma, unsigned long address) +static int adjust_pte(struct vm_area_struct *vma, unsigned long address, + unsigned long pfn) { spinlock_t *ptl; pgd_t *pgd; @@ -90,7 +90,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) pte = pte_offset_map_nested(pmd, address); spin_lock(ptl); - ret = do_adjust_pte(vma, address, pte); + ret = do_adjust_pte(vma, address, pfn, pte); spin_unlock(ptl); pte_unmap_nested(pte); @@ -127,11 +127,11 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne if (!(mpnt->vm_flags & VM_MAYSHARE)) continue; offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; - aliases += adjust_pte(mpnt, mpnt->vm_start + offset); + aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn); } flush_dcache_mmap_unlock(mapping); if (aliases) - adjust_pte(vma, addr); + adjust_pte(vma, addr, pfn); else flush_cache_page(vma, addr, pfn); } -- cgit v1.1 From 7ada189f5c8627662c23f49b3e68463f86fc511e Mon Sep 17 00:00:00 2001 From: Jamie Iles Date: Tue, 2 Feb 2010 20:24:58 +0100 Subject: ARM: 5900/2: arm: enable support for software perf events The perf events subsystem allows counting of both hardware and software events. This patch implements the bare minimum for software performance events. Cc: Peter Zijlstra Cc: Ingo Molnar Signed-off-by: Jamie Iles Signed-off-by: Russell King --- arch/arm/mm/fault.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 10e0680..9d40c34 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -302,6 +303,12 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) fault = __do_page_fault(mm, addr, fsr, tsk); up_read(&mm->mmap_sem); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr); + if (fault & VM_FAULT_MAJOR) + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr); + else if (fault & VM_FAULT_MINOR) + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr); + /* * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR */ -- cgit v1.1 From 18eabe2347ae7a11b3db768695913724166dfb0e Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 31 Oct 2009 16:52:16 +0000 Subject: ARM: dma-mapping: introduce the idea of buffer ownership The DMA API has the notion of buffer ownership; make it explicit in the ARM implementation of this API. This gives us a set of hooks to allow us to deal with CPU cache issues arising from non-cache coherent DMA. Signed-off-by: Russell King Tested-By: Santosh Shilimkar Tested-By: Jamie Iles --- arch/arm/mm/dma-mapping.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 26325cb..a316c94 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -573,8 +573,12 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int i; for_each_sg(sg, s, nents, i) { - dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, - sg_dma_len(s), dir); + if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, + sg_dma_len(s), dir)) + continue; + + __dma_page_dev_to_cpu(sg_page(s), s->offset, + s->length, dir); } } EXPORT_SYMBOL(dma_sync_sg_for_cpu); @@ -597,9 +601,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, sg_dma_len(s), dir)) continue; - if (!arch_is_coherent()) - dma_cache_maint_page(sg_page(s), s->offset, - s->length, dir); + __dma_page_cpu_to_dev(sg_page(s), s->offset, + s->length, dir); } } EXPORT_SYMBOL(dma_sync_sg_for_device); -- cgit v1.1 From 4ea0d7371e808628d11154b0d44140b70f05b998 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 24 Nov 2009 16:27:17 +0000 Subject: ARM: dma-mapping: push buffer ownership down into dma-mapping.c Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/mm/dma-mapping.c | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index a316c94..bbf8788 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -404,7 +404,7 @@ EXPORT_SYMBOL(dma_free_coherent); * platforms with CONFIG_DMABOUNCE. * Use the driver DMA support - see dma-mapping.h (dma_sync_*) */ -void dma_cache_maint(const void *start, size_t size, int direction) +static void dma_cache_maint(const void *start, size_t size, int direction) { void (*inner_op)(const void *, const void *); void (*outer_op)(unsigned long, unsigned long); @@ -431,7 +431,20 @@ void dma_cache_maint(const void *start, size_t size, int direction) inner_op(start, start + size); outer_op(__pa(start), __pa(start) + size); } -EXPORT_SYMBOL(dma_cache_maint); + +void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + dma_cache_maint(kaddr, size, dir); +} +EXPORT_SYMBOL(___dma_single_cpu_to_dev); + +void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + /* nothing to do */ +} +EXPORT_SYMBOL(___dma_single_dev_to_cpu); static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, size_t size, int direction) @@ -474,7 +487,7 @@ static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, outer_op(paddr, paddr + size); } -void dma_cache_maint_page(struct page *page, unsigned long offset, +static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, int dir) { /* @@ -499,7 +512,20 @@ void dma_cache_maint_page(struct page *page, unsigned long offset, left -= len; } while (left); } -EXPORT_SYMBOL(dma_cache_maint_page); + +void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + dma_cache_maint_page(page, off, size, dir); +} +EXPORT_SYMBOL(___dma_page_cpu_to_dev); + +void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + /* nothing to do */ +} +EXPORT_SYMBOL(___dma_page_dev_to_cpu); /** * dma_map_sg - map a set of SG buffers for streaming mode DMA -- cgit v1.1 From 65af191a0414d0e1145f67c153e1b63d122dfbb4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 24 Nov 2009 17:53:33 +0000 Subject: ARM: dma-mapping: move selection of page ops out of dma_cache_maint_contiguous Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/mm/dma-mapping.c | 59 ++++++++++++++++++++++++----------------------- 1 file changed, 30 insertions(+), 29 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index bbf8788..77dc483 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -447,48 +447,25 @@ void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, EXPORT_SYMBOL(___dma_single_dev_to_cpu); static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, - size_t size, int direction) + size_t size, void (*op)(const void *, const void *)) { void *vaddr; - unsigned long paddr; - void (*inner_op)(const void *, const void *); - void (*outer_op)(unsigned long, unsigned long); - - switch (direction) { - case DMA_FROM_DEVICE: /* invalidate only */ - inner_op = dmac_inv_range; - outer_op = outer_inv_range; - break; - case DMA_TO_DEVICE: /* writeback only */ - inner_op = dmac_clean_range; - outer_op = outer_clean_range; - break; - case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - inner_op = dmac_flush_range; - outer_op = outer_flush_range; - break; - default: - BUG(); - } if (!PageHighMem(page)) { vaddr = page_address(page) + offset; - inner_op(vaddr, vaddr + size); + op(vaddr, vaddr + size); } else { vaddr = kmap_high_get(page); if (vaddr) { vaddr += offset; - inner_op(vaddr, vaddr + size); + op(vaddr, vaddr + size); kunmap_high(page); } } - - paddr = page_to_phys(page) + offset; - outer_op(paddr, paddr + size); } static void dma_cache_maint_page(struct page *page, unsigned long offset, - size_t size, int dir) + size_t size, void (*op)(const void *, const void *)) { /* * A single sg entry may refer to multiple physically contiguous @@ -506,7 +483,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, } len = PAGE_SIZE - offset; } - dma_cache_maint_contiguous(page, offset, len, dir); + dma_cache_maint_contiguous(page, offset, len, op); offset = 0; page++; left -= len; @@ -516,7 +493,31 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { - dma_cache_maint_page(page, off, size, dir); + unsigned long paddr; + void (*inner_op)(const void *, const void *); + void (*outer_op)(unsigned long, unsigned long); + + switch (direction) { + case DMA_FROM_DEVICE: /* invalidate only */ + inner_op = dmac_inv_range; + outer_op = outer_inv_range; + break; + case DMA_TO_DEVICE: /* writeback only */ + inner_op = dmac_clean_range; + outer_op = outer_clean_range; + break; + case DMA_BIDIRECTIONAL: /* writeback and invalidate */ + inner_op = dmac_flush_range; + outer_op = outer_flush_range; + break; + default: + BUG(); + } + + dma_cache_maint_page(page, off, size, inner_op); + + paddr = page_to_phys(page) + off; + outer_op(paddr, paddr + size); } EXPORT_SYMBOL(___dma_page_cpu_to_dev); -- cgit v1.1 From 93f1d629e22b08642eb713ad96ac2cb9ade0641c Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 24 Nov 2009 14:41:01 +0000 Subject: ARM: dma-mapping: simplify dma_cache_maint_page dma_cache_maint_contiguous is now simple enough to live inside dma_cache_maint_page, so move it there. Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/mm/dma-mapping.c | 42 ++++++++++++++++++------------------------ 1 file changed, 18 insertions(+), 24 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 77dc483..0d68d2c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -446,24 +446,6 @@ void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, } EXPORT_SYMBOL(___dma_single_dev_to_cpu); -static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, - size_t size, void (*op)(const void *, const void *)) -{ - void *vaddr; - - if (!PageHighMem(page)) { - vaddr = page_address(page) + offset; - op(vaddr, vaddr + size); - } else { - vaddr = kmap_high_get(page); - if (vaddr) { - vaddr += offset; - op(vaddr, vaddr + size); - kunmap_high(page); - } - } -} - static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, void (*op)(const void *, const void *)) { @@ -476,14 +458,26 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t left = size; do { size_t len = left; - if (PageHighMem(page) && len + offset > PAGE_SIZE) { - if (offset >= PAGE_SIZE) { - page += offset / PAGE_SIZE; - offset %= PAGE_SIZE; + void *vaddr; + + if (PageHighMem(page)) { + if (len + offset > PAGE_SIZE) { + if (offset >= PAGE_SIZE) { + page += offset / PAGE_SIZE; + offset %= PAGE_SIZE; + } + len = PAGE_SIZE - offset; + } + vaddr = kmap_high_get(page); + if (vaddr) { + vaddr += offset; + op(vaddr, vaddr + len); + kunmap_high(page); } - len = PAGE_SIZE - offset; + } else { + vaddr = page_address(page) + offset; + op(vaddr, vaddr + len); } - dma_cache_maint_contiguous(page, offset, len, op); offset = 0; page++; left -= len; -- cgit v1.1 From a9c9147eb9b1dba0ce567a41897c7773b4d1b0bc Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 26 Nov 2009 16:19:58 +0000 Subject: ARM: dma-mapping: provide per-cpu type map/unmap functions Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/mm/cache-fa.S | 26 ++++++++++++++++++++++++++ arch/arm/mm/cache-v3.S | 24 ++++++++++++++++++++++++ arch/arm/mm/cache-v4.S | 24 ++++++++++++++++++++++++ arch/arm/mm/cache-v4wb.S | 26 ++++++++++++++++++++++++++ arch/arm/mm/cache-v4wt.S | 25 +++++++++++++++++++++++++ arch/arm/mm/cache-v6.S | 26 ++++++++++++++++++++++++++ arch/arm/mm/cache-v7.S | 26 ++++++++++++++++++++++++++ arch/arm/mm/dma-mapping.c | 29 ++++++++++++----------------- arch/arm/mm/proc-arm1020.S | 26 ++++++++++++++++++++++++++ arch/arm/mm/proc-arm1020e.S | 26 ++++++++++++++++++++++++++ arch/arm/mm/proc-arm1022.S | 26 ++++++++++++++++++++++++++ arch/arm/mm/proc-arm1026.S | 26 ++++++++++++++++++++++++++ arch/arm/mm/proc-arm920.S | 26 ++++++++++++++++++++++++++ arch/arm/mm/proc-arm922.S | 26 ++++++++++++++++++++++++++ arch/arm/mm/proc-arm925.S | 26 ++++++++++++++++++++++++++ arch/arm/mm/proc-arm926.S | 26 ++++++++++++++++++++++++++ arch/arm/mm/proc-arm940.S | 26 ++++++++++++++++++++++++++ arch/arm/mm/proc-arm946.S | 26 ++++++++++++++++++++++++++ arch/arm/mm/proc-feroceon.S | 42 ++++++++++++++++++++++++++++++++++++++++++ arch/arm/mm/proc-mohawk.S | 26 ++++++++++++++++++++++++++ arch/arm/mm/proc-xsc3.S | 26 ++++++++++++++++++++++++++ arch/arm/mm/proc-xscale.S | 41 +++++++++++++++++++++++++++++++++++++++++ 22 files changed, 584 insertions(+), 17 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index a89444a..8ebffdd 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S @@ -205,6 +205,30 @@ ENTRY(fa_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(fa_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq fa_dma_clean_range + bcs fa_dma_inv_range + b fa_dma_flush_range +ENDPROC(fa_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(fa_dma_unmap_area) + mov pc, lr +ENDPROC(fa_dma_unmap_area) + __INITDATA .type fa_cache_fns, #object @@ -215,6 +239,8 @@ ENTRY(fa_cache_fns) .long fa_coherent_kern_range .long fa_coherent_user_range .long fa_flush_kern_dcache_area + .long fa_dma_map_area + .long fa_dma_unmap_area .long fa_dma_inv_range .long fa_dma_clean_range .long fa_dma_flush_range diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 2a48273..6df52dc 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S @@ -121,6 +121,28 @@ ENTRY(v3_dma_flush_range) ENTRY(v3_dma_clean_range) mov pc, lr +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v3_dma_unmap_area) + teq r2, #DMA_TO_DEVICE + bne v3_dma_inv_range + /* FALLTHROUGH */ + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v3_dma_map_area) + mov pc, lr +ENDPROC(v3_dma_unmap_area) +ENDPROC(v3_dma_map_area) + __INITDATA .type v3_cache_fns, #object @@ -131,6 +153,8 @@ ENTRY(v3_cache_fns) .long v3_coherent_kern_range .long v3_coherent_user_range .long v3_flush_kern_dcache_area + .long v3_dma_map_area + .long v3_dma_unmap_area .long v3_dma_inv_range .long v3_dma_clean_range .long v3_dma_flush_range diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 5c7da3e..df3b423 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -133,6 +133,28 @@ ENTRY(v4_dma_flush_range) ENTRY(v4_dma_clean_range) mov pc, lr +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4_dma_unmap_area) + teq r2, #DMA_TO_DEVICE + bne v4_dma_inv_range + /* FALLTHROUGH */ + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4_dma_map_area) + mov pc, lr +ENDPROC(v4_dma_unmap_area) +ENDPROC(v4_dma_map_area) + __INITDATA .type v4_cache_fns, #object @@ -143,6 +165,8 @@ ENTRY(v4_cache_fns) .long v4_coherent_kern_range .long v4_coherent_user_range .long v4_flush_kern_dcache_area + .long v4_dma_map_area + .long v4_dma_unmap_area .long v4_dma_inv_range .long v4_dma_clean_range .long v4_dma_flush_range diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index 3dbedf1e..32e7a74 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S @@ -216,6 +216,30 @@ ENTRY(v4wb_dma_clean_range) .globl v4wb_dma_flush_range .set v4wb_dma_flush_range, v4wb_coherent_kern_range +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wb_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq v4wb_dma_clean_range + bcs v4wb_dma_inv_range + b v4wb_dma_flush_range +ENDPROC(v4wb_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wb_dma_unmap_area) + mov pc, lr +ENDPROC(v4wb_dma_unmap_area) + __INITDATA .type v4wb_cache_fns, #object @@ -226,6 +250,8 @@ ENTRY(v4wb_cache_fns) .long v4wb_coherent_kern_range .long v4wb_coherent_user_range .long v4wb_flush_kern_dcache_area + .long v4wb_dma_map_area + .long v4wb_dma_unmap_area .long v4wb_dma_inv_range .long v4wb_dma_clean_range .long v4wb_dma_flush_range diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index b3b7410..3d8dad5 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S @@ -172,6 +172,29 @@ ENTRY(v4wt_dma_clean_range) .globl v4wt_dma_flush_range .equ v4wt_dma_flush_range, v4wt_dma_inv_range +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wt_dma_unmap_area) + add r1, r1, r0 + teq r2, #DMA_TO_DEVICE + bne v4wt_dma_inv_range + /* FALLTHROUGH */ + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wt_dma_map_area) + mov pc, lr +ENDPROC(v4wt_dma_unmap_area) +ENDPROC(v4wt_dma_map_area) + __INITDATA .type v4wt_cache_fns, #object @@ -182,6 +205,8 @@ ENTRY(v4wt_cache_fns) .long v4wt_coherent_kern_range .long v4wt_coherent_user_range .long v4wt_flush_kern_dcache_area + .long v4wt_dma_map_area + .long v4wt_dma_unmap_area .long v4wt_dma_inv_range .long v4wt_dma_clean_range .long v4wt_dma_flush_range diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 4ba0a24..6f926dd 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -263,6 +263,30 @@ ENTRY(v6_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v6_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq v6_dma_clean_range + bcs v6_dma_inv_range + b v6_dma_flush_range +ENDPROC(v6_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v6_dma_unmap_area) + mov pc, lr +ENDPROC(v6_dma_unmap_area) + __INITDATA .type v6_cache_fns, #object @@ -273,6 +297,8 @@ ENTRY(v6_cache_fns) .long v6_coherent_kern_range .long v6_coherent_user_range .long v6_flush_kern_dcache_area + .long v6_dma_map_area + .long v6_dma_unmap_area .long v6_dma_inv_range .long v6_dma_clean_range .long v6_dma_flush_range diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 9073db8..e30d8bc 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -271,6 +271,30 @@ ENTRY(v7_dma_flush_range) mov pc, lr ENDPROC(v7_dma_flush_range) +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v7_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq v7_dma_clean_range + bcs v7_dma_inv_range + b v7_dma_flush_range +ENDPROC(v7_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v7_dma_unmap_area) + mov pc, lr +ENDPROC(v7_dma_unmap_area) + __INITDATA .type v7_cache_fns, #object @@ -281,6 +305,8 @@ ENTRY(v7_cache_fns) .long v7_coherent_kern_range .long v7_coherent_user_range .long v7_flush_kern_dcache_area + .long v7_dma_map_area + .long v7_dma_unmap_area .long v7_dma_inv_range .long v7_dma_clean_range .long v7_dma_flush_range diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 0d68d2c..efa8efa3 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -406,35 +406,31 @@ EXPORT_SYMBOL(dma_free_coherent); */ static void dma_cache_maint(const void *start, size_t size, int direction) { - void (*inner_op)(const void *, const void *); void (*outer_op)(unsigned long, unsigned long); - BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1)); - switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ - inner_op = dmac_inv_range; outer_op = outer_inv_range; break; case DMA_TO_DEVICE: /* writeback only */ - inner_op = dmac_clean_range; outer_op = outer_clean_range; break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - inner_op = dmac_flush_range; outer_op = outer_flush_range; break; default: BUG(); } - inner_op(start, start + size); outer_op(__pa(start), __pa(start) + size); } void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, enum dma_data_direction dir) { + BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); + + dmac_map_area(kaddr, size, dir); dma_cache_maint(kaddr, size, dir); } EXPORT_SYMBOL(___dma_single_cpu_to_dev); @@ -442,12 +438,15 @@ EXPORT_SYMBOL(___dma_single_cpu_to_dev); void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); + + dmac_unmap_area(kaddr, size, dir); } EXPORT_SYMBOL(___dma_single_dev_to_cpu); static void dma_cache_maint_page(struct page *page, unsigned long offset, - size_t size, void (*op)(const void *, const void *)) + size_t size, enum dma_data_direction dir, + void (*op)(const void *, size_t, int)) { /* * A single sg entry may refer to multiple physically contiguous @@ -471,12 +470,12 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, vaddr = kmap_high_get(page); if (vaddr) { vaddr += offset; - op(vaddr, vaddr + len); + op(vaddr, len, dir); kunmap_high(page); } } else { vaddr = page_address(page) + offset; - op(vaddr, vaddr + len); + op(vaddr, len, dir); } offset = 0; page++; @@ -488,27 +487,23 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { unsigned long paddr; - void (*inner_op)(const void *, const void *); void (*outer_op)(unsigned long, unsigned long); switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ - inner_op = dmac_inv_range; outer_op = outer_inv_range; break; case DMA_TO_DEVICE: /* writeback only */ - inner_op = dmac_clean_range; outer_op = outer_clean_range; break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - inner_op = dmac_flush_range; outer_op = outer_flush_range; break; default: BUG(); } - dma_cache_maint_page(page, off, size, inner_op); + dma_cache_maint_page(page, off, size, dir, dmac_map_area); paddr = page_to_phys(page) + off; outer_op(paddr, paddr + size); @@ -518,7 +513,7 @@ EXPORT_SYMBOL(___dma_page_cpu_to_dev); void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); } EXPORT_SYMBOL(___dma_page_dev_to_cpu); diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 8012e24..c85f5eb 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -330,6 +330,30 @@ ENTRY(arm1020_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1020_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm1020_dma_clean_range + bcs arm1020_dma_inv_range + b arm1020_dma_flush_range +ENDPROC(arm1020_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1020_dma_unmap_area) + mov pc, lr +ENDPROC(arm1020_dma_unmap_area) + ENTRY(arm1020_cache_fns) .long arm1020_flush_kern_cache_all .long arm1020_flush_user_cache_all @@ -337,6 +361,8 @@ ENTRY(arm1020_cache_fns) .long arm1020_coherent_kern_range .long arm1020_coherent_user_range .long arm1020_flush_kern_dcache_area + .long arm1020_dma_map_area + .long arm1020_dma_unmap_area .long arm1020_dma_inv_range .long arm1020_dma_clean_range .long arm1020_dma_flush_range diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 41fe25d..5a3cf76 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -316,6 +316,30 @@ ENTRY(arm1020e_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1020e_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm1020e_dma_clean_range + bcs arm1020e_dma_inv_range + b arm1020e_dma_flush_range +ENDPROC(arm1020e_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1020e_dma_unmap_area) + mov pc, lr +ENDPROC(arm1020e_dma_unmap_area) + ENTRY(arm1020e_cache_fns) .long arm1020e_flush_kern_cache_all .long arm1020e_flush_user_cache_all @@ -323,6 +347,8 @@ ENTRY(arm1020e_cache_fns) .long arm1020e_coherent_kern_range .long arm1020e_coherent_user_range .long arm1020e_flush_kern_dcache_area + .long arm1020e_dma_map_area + .long arm1020e_dma_unmap_area .long arm1020e_dma_inv_range .long arm1020e_dma_clean_range .long arm1020e_dma_flush_range diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 20a5b1b..fec8f58 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -305,6 +305,30 @@ ENTRY(arm1022_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1022_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm1022_dma_clean_range + bcs arm1022_dma_inv_range + b arm1022_dma_flush_range +ENDPROC(arm1022_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1022_dma_unmap_area) + mov pc, lr +ENDPROC(arm1022_dma_unmap_area) + ENTRY(arm1022_cache_fns) .long arm1022_flush_kern_cache_all .long arm1022_flush_user_cache_all @@ -312,6 +336,8 @@ ENTRY(arm1022_cache_fns) .long arm1022_coherent_kern_range .long arm1022_coherent_user_range .long arm1022_flush_kern_dcache_area + .long arm1022_dma_map_area + .long arm1022_dma_unmap_area .long arm1022_dma_inv_range .long arm1022_dma_clean_range .long arm1022_dma_flush_range diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 96aedb1..9ece6f6 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -299,6 +299,30 @@ ENTRY(arm1026_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1026_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm1026_dma_clean_range + bcs arm1026_dma_inv_range + b arm1026_dma_flush_range +ENDPROC(arm1026_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1026_dma_unmap_area) + mov pc, lr +ENDPROC(arm1026_dma_unmap_area) + ENTRY(arm1026_cache_fns) .long arm1026_flush_kern_cache_all .long arm1026_flush_user_cache_all @@ -306,6 +330,8 @@ ENTRY(arm1026_cache_fns) .long arm1026_coherent_kern_range .long arm1026_coherent_user_range .long arm1026_flush_kern_dcache_area + .long arm1026_dma_map_area + .long arm1026_dma_unmap_area .long arm1026_dma_inv_range .long arm1026_dma_clean_range .long arm1026_dma_flush_range diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 471669e..6f6ab27 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -288,6 +288,30 @@ ENTRY(arm920_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm920_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm920_dma_clean_range + bcs arm920_dma_inv_range + b arm920_dma_flush_range +ENDPROC(arm920_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm920_dma_unmap_area) + mov pc, lr +ENDPROC(arm920_dma_unmap_area) + ENTRY(arm920_cache_fns) .long arm920_flush_kern_cache_all .long arm920_flush_user_cache_all @@ -295,6 +319,8 @@ ENTRY(arm920_cache_fns) .long arm920_coherent_kern_range .long arm920_coherent_user_range .long arm920_flush_kern_dcache_area + .long arm920_dma_map_area + .long arm920_dma_unmap_area .long arm920_dma_inv_range .long arm920_dma_clean_range .long arm920_dma_flush_range diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index ee111b0..4e4396b 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -290,6 +290,30 @@ ENTRY(arm922_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm922_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm922_dma_clean_range + bcs arm922_dma_inv_range + b arm922_dma_flush_range +ENDPROC(arm922_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm922_dma_unmap_area) + mov pc, lr +ENDPROC(arm922_dma_unmap_area) + ENTRY(arm922_cache_fns) .long arm922_flush_kern_cache_all .long arm922_flush_user_cache_all @@ -297,6 +321,8 @@ ENTRY(arm922_cache_fns) .long arm922_coherent_kern_range .long arm922_coherent_user_range .long arm922_flush_kern_dcache_area + .long arm922_dma_map_area + .long arm922_dma_unmap_area .long arm922_dma_inv_range .long arm922_dma_clean_range .long arm922_dma_flush_range diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 8deb5bd..7c01c5d 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -341,6 +341,30 @@ ENTRY(arm925_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm925_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm925_dma_clean_range + bcs arm925_dma_inv_range + b arm925_dma_flush_range +ENDPROC(arm925_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm925_dma_unmap_area) + mov pc, lr +ENDPROC(arm925_dma_unmap_area) + ENTRY(arm925_cache_fns) .long arm925_flush_kern_cache_all .long arm925_flush_user_cache_all @@ -348,6 +372,8 @@ ENTRY(arm925_cache_fns) .long arm925_coherent_kern_range .long arm925_coherent_user_range .long arm925_flush_kern_dcache_area + .long arm925_dma_map_area + .long arm925_dma_unmap_area .long arm925_dma_inv_range .long arm925_dma_clean_range .long arm925_dma_flush_range diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 64db6e2..72a01a4 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -304,6 +304,30 @@ ENTRY(arm926_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm926_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm926_dma_clean_range + bcs arm926_dma_inv_range + b arm926_dma_flush_range +ENDPROC(arm926_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm926_dma_unmap_area) + mov pc, lr +ENDPROC(arm926_dma_unmap_area) + ENTRY(arm926_cache_fns) .long arm926_flush_kern_cache_all .long arm926_flush_user_cache_all @@ -311,6 +335,8 @@ ENTRY(arm926_cache_fns) .long arm926_coherent_kern_range .long arm926_coherent_user_range .long arm926_flush_kern_dcache_area + .long arm926_dma_map_area + .long arm926_dma_unmap_area .long arm926_dma_inv_range .long arm926_dma_clean_range .long arm926_dma_flush_range diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 8196b9f..6bb58fc 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -233,6 +233,30 @@ ENTRY(arm940_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm940_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm940_dma_clean_range + bcs arm940_dma_inv_range + b arm940_dma_flush_range +ENDPROC(arm940_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm940_dma_unmap_area) + mov pc, lr +ENDPROC(arm940_dma_unmap_area) + ENTRY(arm940_cache_fns) .long arm940_flush_kern_cache_all .long arm940_flush_user_cache_all @@ -240,6 +264,8 @@ ENTRY(arm940_cache_fns) .long arm940_coherent_kern_range .long arm940_coherent_user_range .long arm940_flush_kern_dcache_area + .long arm940_dma_map_area + .long arm940_dma_unmap_area .long arm940_dma_inv_range .long arm940_dma_clean_range .long arm940_dma_flush_range diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 9a95123..ac0f9ba 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -275,6 +275,30 @@ ENTRY(arm946_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm946_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm946_dma_clean_range + bcs arm946_dma_inv_range + b arm946_dma_flush_range +ENDPROC(arm946_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm946_dma_unmap_area) + mov pc, lr +ENDPROC(arm946_dma_unmap_area) + ENTRY(arm946_cache_fns) .long arm946_flush_kern_cache_all .long arm946_flush_user_cache_all @@ -282,6 +306,8 @@ ENTRY(arm946_cache_fns) .long arm946_coherent_kern_range .long arm946_coherent_user_range .long arm946_flush_kern_dcache_area + .long arm946_dma_map_area + .long arm946_dma_unmap_area .long arm946_dma_inv_range .long arm946_dma_clean_range .long arm946_dma_flush_range diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index dbc3938..97e1d78 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -367,6 +367,44 @@ ENTRY(feroceon_range_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(feroceon_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq feroceon_dma_clean_range + bcs feroceon_dma_inv_range + b feroceon_dma_flush_range +ENDPROC(feroceon_dma_map_area) + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(feroceon_range_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq feroceon_range_dma_clean_range + bcs feroceon_range_dma_inv_range + b feroceon_range_dma_flush_range +ENDPROC(feroceon_range_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(feroceon_dma_unmap_area) + mov pc, lr +ENDPROC(feroceon_dma_unmap_area) + ENTRY(feroceon_cache_fns) .long feroceon_flush_kern_cache_all .long feroceon_flush_user_cache_all @@ -374,6 +412,8 @@ ENTRY(feroceon_cache_fns) .long feroceon_coherent_kern_range .long feroceon_coherent_user_range .long feroceon_flush_kern_dcache_area + .long feroceon_dma_map_area + .long feroceon_dma_unmap_area .long feroceon_dma_inv_range .long feroceon_dma_clean_range .long feroceon_dma_flush_range @@ -385,6 +425,8 @@ ENTRY(feroceon_range_cache_fns) .long feroceon_coherent_kern_range .long feroceon_coherent_user_range .long feroceon_range_flush_kern_dcache_area + .long feroceon_range_dma_map_area + .long feroceon_dma_unmap_area .long feroceon_range_dma_inv_range .long feroceon_range_dma_clean_range .long feroceon_range_dma_flush_range diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 9674d36..55b7fbe 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -268,6 +268,30 @@ ENTRY(mohawk_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(mohawk_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq mohawk_dma_clean_range + bcs mohawk_dma_inv_range + b mohawk_dma_flush_range +ENDPROC(mohawk_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(mohawk_dma_unmap_area) + mov pc, lr +ENDPROC(mohawk_dma_unmap_area) + ENTRY(mohawk_cache_fns) .long mohawk_flush_kern_cache_all .long mohawk_flush_user_cache_all @@ -275,6 +299,8 @@ ENTRY(mohawk_cache_fns) .long mohawk_coherent_kern_range .long mohawk_coherent_user_range .long mohawk_flush_kern_dcache_area + .long mohawk_dma_map_area + .long mohawk_dma_unmap_area .long mohawk_dma_inv_range .long mohawk_dma_clean_range .long mohawk_dma_flush_range diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 96456f5..4e4ce88 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -304,6 +304,30 @@ ENTRY(xsc3_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ data write barrier mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xsc3_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq xsc3_dma_clean_range + bcs xsc3_dma_inv_range + b xsc3_dma_flush_range +ENDPROC(xsc3_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xsc3_dma_unmap_area) + mov pc, lr +ENDPROC(xsc3_dma_unmap_area) + ENTRY(xsc3_cache_fns) .long xsc3_flush_kern_cache_all .long xsc3_flush_user_cache_all @@ -311,6 +335,8 @@ ENTRY(xsc3_cache_fns) .long xsc3_coherent_kern_range .long xsc3_coherent_user_range .long xsc3_flush_kern_dcache_area + .long xsc3_dma_map_area + .long xsc3_dma_unmap_area .long xsc3_dma_inv_range .long xsc3_dma_clean_range .long xsc3_dma_flush_range diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 93df472..a7999f9 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -363,6 +363,43 @@ ENTRY(xscale_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xscale_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq xscale_dma_clean_range + bcs xscale_dma_inv_range + b xscale_dma_flush_range +ENDPROC(xscale_dma_map_area) + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xscale_dma_a0_map_area) + add r1, r1, r0 + teq r2, #DMA_TO_DEVICE + beq xscale_dma_clean_range + b xscale_dma_flush_range +ENDPROC(xscsale_dma_a0_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xscale_dma_unmap_area) + mov pc, lr +ENDPROC(xscale_dma_unmap_area) + ENTRY(xscale_cache_fns) .long xscale_flush_kern_cache_all .long xscale_flush_user_cache_all @@ -370,6 +407,8 @@ ENTRY(xscale_cache_fns) .long xscale_coherent_kern_range .long xscale_coherent_user_range .long xscale_flush_kern_dcache_area + .long xscale_dma_map_area + .long xscale_dma_unmap_area .long xscale_dma_inv_range .long xscale_dma_clean_range .long xscale_dma_flush_range @@ -394,6 +433,8 @@ ENTRY(xscale_80200_A0_A1_cache_fns) .long xscale_coherent_kern_range .long xscale_coherent_user_range .long xscale_flush_kern_dcache_area + .long xscale_dma_a0_map_area + .long xscale_dma_unmap_area .long xscale_dma_flush_range .long xscale_dma_clean_range .long xscale_dma_flush_range -- cgit v1.1 From 702b94bff3c50542a6e4ab9a4f4cef093262fe65 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 26 Nov 2009 16:24:19 +0000 Subject: ARM: dma-mapping: remove dmac_clean_range and dmac_inv_range These are now unused, and so can be removed. Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/mm/cache-fa.S | 6 ++---- arch/arm/mm/cache-v3.S | 29 +---------------------------- arch/arm/mm/cache-v4.S | 29 +---------------------------- arch/arm/mm/cache-v4wb.S | 6 ++---- arch/arm/mm/cache-v4wt.S | 15 +-------------- arch/arm/mm/cache-v6.S | 6 ++---- arch/arm/mm/cache-v7.S | 6 ++---- arch/arm/mm/proc-arm1020.S | 6 ++---- arch/arm/mm/proc-arm1020e.S | 6 ++---- arch/arm/mm/proc-arm1022.S | 6 ++---- arch/arm/mm/proc-arm1026.S | 6 ++---- arch/arm/mm/proc-arm920.S | 6 ++---- arch/arm/mm/proc-arm922.S | 6 ++---- arch/arm/mm/proc-arm925.S | 6 ++---- arch/arm/mm/proc-arm926.S | 6 ++---- arch/arm/mm/proc-arm940.S | 6 ++---- arch/arm/mm/proc-arm946.S | 6 ++---- arch/arm/mm/proc-feroceon.S | 12 ++++-------- arch/arm/mm/proc-mohawk.S | 6 ++---- arch/arm/mm/proc-xsc3.S | 6 ++---- arch/arm/mm/proc-xscale.S | 8 ++------ 21 files changed, 41 insertions(+), 148 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index 8ebffdd..7148e53 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S @@ -157,7 +157,7 @@ ENTRY(fa_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(fa_dma_inv_range) +fa_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry @@ -180,7 +180,7 @@ ENTRY(fa_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(fa_dma_clean_range) +fa_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -241,7 +241,5 @@ ENTRY(fa_cache_fns) .long fa_flush_kern_dcache_area .long fa_dma_map_area .long fa_dma_unmap_area - .long fa_dma_inv_range - .long fa_dma_clean_range .long fa_dma_flush_range .size fa_cache_fns, . - fa_cache_fns diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 6df52dc..c2ff3c5 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S @@ -84,20 +84,6 @@ ENTRY(v3_flush_kern_dcache_area) /* FALLTHROUGH */ /* - * dma_inv_range(start, end) - * - * Invalidate (discard) the specified virtual address range. - * May not write back any entries. If 'start' or 'end' - * are not cache line aligned, those lines must be written - * back. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_dma_inv_range) - /* FALLTHROUGH */ - -/* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. @@ -108,17 +94,6 @@ ENTRY(v3_dma_inv_range) ENTRY(v3_dma_flush_range) mov r0, #0 mcr p15, 0, r0, c7, c0, 0 @ flush ID cache - /* FALLTHROUGH */ - -/* - * dma_clean_range(start, end) - * - * Clean (write back) the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_dma_clean_range) mov pc, lr /* @@ -129,7 +104,7 @@ ENTRY(v3_dma_clean_range) */ ENTRY(v3_dma_unmap_area) teq r2, #DMA_TO_DEVICE - bne v3_dma_inv_range + bne v3_dma_flush_range /* FALLTHROUGH */ /* @@ -155,7 +130,5 @@ ENTRY(v3_cache_fns) .long v3_flush_kern_dcache_area .long v3_dma_map_area .long v3_dma_unmap_area - .long v3_dma_inv_range - .long v3_dma_clean_range .long v3_dma_flush_range .size v3_cache_fns, . - v3_cache_fns diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index df3b423..4810f7e 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -94,20 +94,6 @@ ENTRY(v4_flush_kern_dcache_area) /* FALLTHROUGH */ /* - * dma_inv_range(start, end) - * - * Invalidate (discard) the specified virtual address range. - * May not write back any entries. If 'start' or 'end' - * are not cache line aligned, those lines must be written - * back. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v4_dma_inv_range) - /* FALLTHROUGH */ - -/* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. @@ -120,17 +106,6 @@ ENTRY(v4_dma_flush_range) mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache #endif - /* FALLTHROUGH */ - -/* - * dma_clean_range(start, end) - * - * Clean (write back) the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v4_dma_clean_range) mov pc, lr /* @@ -141,7 +116,7 @@ ENTRY(v4_dma_clean_range) */ ENTRY(v4_dma_unmap_area) teq r2, #DMA_TO_DEVICE - bne v4_dma_inv_range + bne v4_dma_flush_range /* FALLTHROUGH */ /* @@ -167,7 +142,5 @@ ENTRY(v4_cache_fns) .long v4_flush_kern_dcache_area .long v4_dma_map_area .long v4_dma_unmap_area - .long v4_dma_inv_range - .long v4_dma_clean_range .long v4_dma_flush_range .size v4_cache_fns, . - v4_cache_fns diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index 32e7a74..df8368a 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S @@ -173,7 +173,7 @@ ENTRY(v4wb_coherent_user_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(v4wb_dma_inv_range) +v4wb_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -194,7 +194,7 @@ ENTRY(v4wb_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(v4wb_dma_clean_range) +v4wb_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -252,7 +252,5 @@ ENTRY(v4wb_cache_fns) .long v4wb_flush_kern_dcache_area .long v4wb_dma_map_area .long v4wb_dma_unmap_area - .long v4wb_dma_inv_range - .long v4wb_dma_clean_range .long v4wb_dma_flush_range .size v4wb_cache_fns, . - v4wb_cache_fns diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index 3d8dad5..45c7031 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S @@ -142,23 +142,12 @@ ENTRY(v4wt_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(v4wt_dma_inv_range) +v4wt_dma_inv_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b - /* FALLTHROUGH */ - -/* - * dma_clean_range(start, end) - * - * Clean the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v4wt_dma_clean_range) mov pc, lr /* @@ -207,7 +196,5 @@ ENTRY(v4wt_cache_fns) .long v4wt_flush_kern_dcache_area .long v4wt_dma_map_area .long v4wt_dma_unmap_area - .long v4wt_dma_inv_range - .long v4wt_dma_clean_range .long v4wt_dma_flush_range .size v4wt_cache_fns, . - v4wt_cache_fns diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 6f926dd..a11934e 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -195,7 +195,7 @@ ENTRY(v6_flush_kern_dcache_area) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v6_dma_inv_range) +v6_dma_inv_range: tst r0, #D_CACHE_LINE_SIZE - 1 bic r0, r0, #D_CACHE_LINE_SIZE - 1 #ifdef HARVARD_CACHE @@ -228,7 +228,7 @@ ENTRY(v6_dma_inv_range) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v6_dma_clean_range) +v6_dma_clean_range: bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: #ifdef HARVARD_CACHE @@ -299,7 +299,5 @@ ENTRY(v6_cache_fns) .long v6_flush_kern_dcache_area .long v6_dma_map_area .long v6_dma_unmap_area - .long v6_dma_inv_range - .long v6_dma_clean_range .long v6_dma_flush_range .size v6_cache_fns, . - v6_cache_fns diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index e30d8bc..b1cd0fd 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -216,7 +216,7 @@ ENDPROC(v7_flush_kern_dcache_area) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v7_dma_inv_range) +v7_dma_inv_range: dcache_line_size r2, r3 sub r3, r2, #1 tst r0, r3 @@ -240,7 +240,7 @@ ENDPROC(v7_dma_inv_range) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v7_dma_clean_range) +v7_dma_clean_range: dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 @@ -307,7 +307,5 @@ ENTRY(v7_cache_fns) .long v7_flush_kern_dcache_area .long v7_dma_map_area .long v7_dma_unmap_area - .long v7_dma_inv_range - .long v7_dma_clean_range .long v7_dma_flush_range .size v7_cache_fns, . - v7_cache_fns diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index c85f5eb..72507c6 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -265,7 +265,7 @@ ENTRY(arm1020_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm1020_dma_inv_range) +arm1020_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -295,7 +295,7 @@ ENTRY(arm1020_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm1020_dma_clean_range) +arm1020_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -363,8 +363,6 @@ ENTRY(arm1020_cache_fns) .long arm1020_flush_kern_dcache_area .long arm1020_dma_map_area .long arm1020_dma_unmap_area - .long arm1020_dma_inv_range - .long arm1020_dma_clean_range .long arm1020_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 5a3cf76..d278298 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -258,7 +258,7 @@ ENTRY(arm1020e_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm1020e_dma_inv_range) +arm1020e_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -284,7 +284,7 @@ ENTRY(arm1020e_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm1020e_dma_clean_range) +arm1020e_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -349,8 +349,6 @@ ENTRY(arm1020e_cache_fns) .long arm1020e_flush_kern_dcache_area .long arm1020e_dma_map_area .long arm1020e_dma_unmap_area - .long arm1020e_dma_inv_range - .long arm1020e_dma_clean_range .long arm1020e_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index fec8f58..ce13e4a 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -247,7 +247,7 @@ ENTRY(arm1022_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm1022_dma_inv_range) +arm1022_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -273,7 +273,7 @@ ENTRY(arm1022_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm1022_dma_clean_range) +arm1022_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -338,8 +338,6 @@ ENTRY(arm1022_cache_fns) .long arm1022_flush_kern_dcache_area .long arm1022_dma_map_area .long arm1022_dma_unmap_area - .long arm1022_dma_inv_range - .long arm1022_dma_clean_range .long arm1022_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 9ece6f6..636672a 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -241,7 +241,7 @@ ENTRY(arm1026_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm1026_dma_inv_range) +arm1026_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -267,7 +267,7 @@ ENTRY(arm1026_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm1026_dma_clean_range) +arm1026_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -332,8 +332,6 @@ ENTRY(arm1026_cache_fns) .long arm1026_flush_kern_dcache_area .long arm1026_dma_map_area .long arm1026_dma_unmap_area - .long arm1026_dma_inv_range - .long arm1026_dma_clean_range .long arm1026_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 6f6ab27..8be8199 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -239,7 +239,7 @@ ENTRY(arm920_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm920_dma_inv_range) +arm920_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -262,7 +262,7 @@ ENTRY(arm920_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm920_dma_clean_range) +arm920_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -321,8 +321,6 @@ ENTRY(arm920_cache_fns) .long arm920_flush_kern_dcache_area .long arm920_dma_map_area .long arm920_dma_unmap_area - .long arm920_dma_inv_range - .long arm920_dma_clean_range .long arm920_dma_flush_range #endif diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 4e4396b..c0ff8e4 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -241,7 +241,7 @@ ENTRY(arm922_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm922_dma_inv_range) +arm922_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -264,7 +264,7 @@ ENTRY(arm922_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm922_dma_clean_range) +arm922_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -323,8 +323,6 @@ ENTRY(arm922_cache_fns) .long arm922_flush_kern_dcache_area .long arm922_dma_map_area .long arm922_dma_unmap_area - .long arm922_dma_inv_range - .long arm922_dma_clean_range .long arm922_dma_flush_range #endif diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 7c01c5d..3c6cffe 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -283,7 +283,7 @@ ENTRY(arm925_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm925_dma_inv_range) +arm925_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -308,7 +308,7 @@ ENTRY(arm925_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm925_dma_clean_range) +arm925_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry @@ -374,8 +374,6 @@ ENTRY(arm925_cache_fns) .long arm925_flush_kern_dcache_area .long arm925_dma_map_area .long arm925_dma_unmap_area - .long arm925_dma_inv_range - .long arm925_dma_clean_range .long arm925_dma_flush_range ENTRY(cpu_arm925_dcache_clean_area) diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 72a01a4..75b707c 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -246,7 +246,7 @@ ENTRY(arm926_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm926_dma_inv_range) +arm926_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -271,7 +271,7 @@ ENTRY(arm926_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm926_dma_clean_range) +arm926_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry @@ -337,8 +337,6 @@ ENTRY(arm926_cache_fns) .long arm926_flush_kern_dcache_area .long arm926_dma_map_area .long arm926_dma_unmap_area - .long arm926_dma_inv_range - .long arm926_dma_clean_range .long arm926_dma_flush_range ENTRY(cpu_arm926_dcache_clean_area) diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 6bb58fc..1af1657 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -171,7 +171,7 @@ ENTRY(arm940_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(arm940_dma_inv_range) +arm940_dma_inv_range: mov ip, #0 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries @@ -192,7 +192,7 @@ ENTRY(arm940_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(arm940_dma_clean_range) +arm940_dma_clean_range: ENTRY(cpu_arm940_dcache_clean_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH @@ -266,8 +266,6 @@ ENTRY(arm940_cache_fns) .long arm940_flush_kern_dcache_area .long arm940_dma_map_area .long arm940_dma_unmap_area - .long arm940_dma_inv_range - .long arm940_dma_clean_range .long arm940_dma_flush_range __INIT diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index ac0f9ba..1664b6a 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -215,7 +215,7 @@ ENTRY(arm946_flush_kern_dcache_area) * - end - virtual end address * (same as arm926) */ -ENTRY(arm946_dma_inv_range) +arm946_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -240,7 +240,7 @@ ENTRY(arm946_dma_inv_range) * * (same as arm926) */ -ENTRY(arm946_dma_clean_range) +arm946_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry @@ -308,8 +308,6 @@ ENTRY(arm946_cache_fns) .long arm946_flush_kern_dcache_area .long arm946_dma_map_area .long arm946_dma_unmap_area - .long arm946_dma_inv_range - .long arm946_dma_clean_range .long arm946_dma_flush_range diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index 97e1d78..53e6323 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -274,7 +274,7 @@ ENTRY(feroceon_range_flush_kern_dcache_area) * (same as v4wb) */ .align 5 -ENTRY(feroceon_dma_inv_range) +feroceon_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -288,7 +288,7 @@ ENTRY(feroceon_dma_inv_range) mov pc, lr .align 5 -ENTRY(feroceon_range_dma_inv_range) +feroceon_range_dma_inv_range: mrs r2, cpsr tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -314,7 +314,7 @@ ENTRY(feroceon_range_dma_inv_range) * (same as v4wb) */ .align 5 -ENTRY(feroceon_dma_clean_range) +feroceon_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -324,7 +324,7 @@ ENTRY(feroceon_dma_clean_range) mov pc, lr .align 5 -ENTRY(feroceon_range_dma_clean_range) +feroceon_range_dma_clean_range: mrs r2, cpsr cmp r1, r0 subne r1, r1, #1 @ top address is inclusive @@ -414,8 +414,6 @@ ENTRY(feroceon_cache_fns) .long feroceon_flush_kern_dcache_area .long feroceon_dma_map_area .long feroceon_dma_unmap_area - .long feroceon_dma_inv_range - .long feroceon_dma_clean_range .long feroceon_dma_flush_range ENTRY(feroceon_range_cache_fns) @@ -427,8 +425,6 @@ ENTRY(feroceon_range_cache_fns) .long feroceon_range_flush_kern_dcache_area .long feroceon_range_dma_map_area .long feroceon_dma_unmap_area - .long feroceon_range_dma_inv_range - .long feroceon_range_dma_clean_range .long feroceon_range_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 55b7fbe..caa3115 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -218,7 +218,7 @@ ENTRY(mohawk_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(mohawk_dma_inv_range) +mohawk_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 @@ -241,7 +241,7 @@ ENTRY(mohawk_dma_inv_range) * * (same as v4wb) */ -ENTRY(mohawk_dma_clean_range) +mohawk_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -301,8 +301,6 @@ ENTRY(mohawk_cache_fns) .long mohawk_flush_kern_dcache_area .long mohawk_dma_map_area .long mohawk_dma_unmap_area - .long mohawk_dma_inv_range - .long mohawk_dma_clean_range .long mohawk_dma_flush_range ENTRY(cpu_mohawk_dcache_clean_area) diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 4e4ce88..046b3d8 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -257,7 +257,7 @@ ENTRY(xsc3_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(xsc3_dma_inv_range) +xsc3_dma_inv_range: tst r0, #CACHELINESIZE - 1 bic r0, r0, #CACHELINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line @@ -278,7 +278,7 @@ ENTRY(xsc3_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(xsc3_dma_clean_range) +xsc3_dma_clean_range: bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line add r0, r0, #CACHELINESIZE @@ -337,8 +337,6 @@ ENTRY(xsc3_cache_fns) .long xsc3_flush_kern_dcache_area .long xsc3_dma_map_area .long xsc3_dma_unmap_area - .long xsc3_dma_inv_range - .long xsc3_dma_clean_range .long xsc3_dma_flush_range ENTRY(cpu_xsc3_dcache_clean_area) diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index a7999f9..63037e2 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -315,7 +315,7 @@ ENTRY(xscale_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(xscale_dma_inv_range) +xscale_dma_inv_range: tst r0, #CACHELINESIZE - 1 bic r0, r0, #CACHELINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -336,7 +336,7 @@ ENTRY(xscale_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(xscale_dma_clean_range) +xscale_dma_clean_range: bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHELINESIZE @@ -409,8 +409,6 @@ ENTRY(xscale_cache_fns) .long xscale_flush_kern_dcache_area .long xscale_dma_map_area .long xscale_dma_unmap_area - .long xscale_dma_inv_range - .long xscale_dma_clean_range .long xscale_dma_flush_range /* @@ -436,8 +434,6 @@ ENTRY(xscale_80200_A0_A1_cache_fns) .long xscale_dma_a0_map_area .long xscale_dma_unmap_area .long xscale_dma_flush_range - .long xscale_dma_clean_range - .long xscale_dma_flush_range ENTRY(cpu_xscale_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry -- cgit v1.1 From 2ffe2da3e71652d4f4cae19539b5c78c2a239136 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 31 Oct 2009 16:52:16 +0000 Subject: ARM: dma-mapping: fix for speculative prefetching ARMv6 and ARMv7 CPUs can perform speculative prefetching, which makes DMA cache coherency handling slightly more interesting. Rather than being able to rely upon the CPU not accessing the DMA buffer until DMA has completed, we now must expect that the cache could be loaded with possibly stale data from the DMA buffer. Where DMA involves data being transferred to the device, we clean the cache before handing it over for DMA, otherwise we invalidate the buffer to get rid of potential writebacks. On DMA Completion, if data was transferred from the device, we invalidate the buffer to get rid of any stale speculative prefetches. Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/mm/cache-v6.S | 10 ++++--- arch/arm/mm/cache-v7.S | 10 ++++--- arch/arm/mm/dma-mapping.c | 68 +++++++++++++++++++++-------------------------- 3 files changed, 42 insertions(+), 46 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index a11934e..9d89c67 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -271,10 +271,9 @@ ENTRY(v6_dma_flush_range) */ ENTRY(v6_dma_map_area) add r1, r1, r0 - cmp r2, #DMA_TO_DEVICE - beq v6_dma_clean_range - bcs v6_dma_inv_range - b v6_dma_flush_range + teq r2, #DMA_FROM_DEVICE + beq v6_dma_inv_range + b v6_dma_clean_range ENDPROC(v6_dma_map_area) /* @@ -284,6 +283,9 @@ ENDPROC(v6_dma_map_area) * - dir - DMA direction */ ENTRY(v6_dma_unmap_area) + add r1, r1, r0 + teq r2, #DMA_TO_DEVICE + bne v6_dma_inv_range mov pc, lr ENDPROC(v6_dma_unmap_area) diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index b1cd0fd..bcd64f2 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -279,10 +279,9 @@ ENDPROC(v7_dma_flush_range) */ ENTRY(v7_dma_map_area) add r1, r1, r0 - cmp r2, #DMA_TO_DEVICE - beq v7_dma_clean_range - bcs v7_dma_inv_range - b v7_dma_flush_range + teq r2, #DMA_FROM_DEVICE + beq v7_dma_inv_range + b v7_dma_clean_range ENDPROC(v7_dma_map_area) /* @@ -292,6 +291,9 @@ ENDPROC(v7_dma_map_area) * - dir - DMA direction */ ENTRY(v7_dma_unmap_area) + add r1, r1, r0 + teq r2, #DMA_TO_DEVICE + bne v7_dma_inv_range mov pc, lr ENDPROC(v7_dma_unmap_area) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index efa8efa3..64daef2 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -404,34 +404,22 @@ EXPORT_SYMBOL(dma_free_coherent); * platforms with CONFIG_DMABOUNCE. * Use the driver DMA support - see dma-mapping.h (dma_sync_*) */ -static void dma_cache_maint(const void *start, size_t size, int direction) -{ - void (*outer_op)(unsigned long, unsigned long); - - switch (direction) { - case DMA_FROM_DEVICE: /* invalidate only */ - outer_op = outer_inv_range; - break; - case DMA_TO_DEVICE: /* writeback only */ - outer_op = outer_clean_range; - break; - case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - outer_op = outer_flush_range; - break; - default: - BUG(); - } - - outer_op(__pa(start), __pa(start) + size); -} - void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, enum dma_data_direction dir) { + unsigned long paddr; + BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); dmac_map_area(kaddr, size, dir); - dma_cache_maint(kaddr, size, dir); + + paddr = __pa(kaddr); + if (dir == DMA_FROM_DEVICE) { + outer_inv_range(paddr, paddr + size); + } else { + outer_clean_range(paddr, paddr + size); + } + /* FIXME: non-speculating: flush on bidirectional mappings? */ } EXPORT_SYMBOL(___dma_single_cpu_to_dev); @@ -440,6 +428,13 @@ void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, { BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); + /* FIXME: non-speculating: not required */ + /* don't bother invalidating if DMA to device */ + if (dir != DMA_TO_DEVICE) { + unsigned long paddr = __pa(kaddr); + outer_inv_range(paddr, paddr + size); + } + dmac_unmap_area(kaddr, size, dir); } EXPORT_SYMBOL(___dma_single_dev_to_cpu); @@ -487,32 +482,29 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { unsigned long paddr; - void (*outer_op)(unsigned long, unsigned long); - - switch (direction) { - case DMA_FROM_DEVICE: /* invalidate only */ - outer_op = outer_inv_range; - break; - case DMA_TO_DEVICE: /* writeback only */ - outer_op = outer_clean_range; - break; - case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - outer_op = outer_flush_range; - break; - default: - BUG(); - } dma_cache_maint_page(page, off, size, dir, dmac_map_area); paddr = page_to_phys(page) + off; - outer_op(paddr, paddr + size); + if (dir == DMA_FROM_DEVICE) { + outer_inv_range(paddr, paddr + size); + } else { + outer_clean_range(paddr, paddr + size); + } + /* FIXME: non-speculating: flush on bidirectional mappings? */ } EXPORT_SYMBOL(___dma_page_cpu_to_dev); void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { + unsigned long paddr = page_to_phys(page) + off; + + /* FIXME: non-speculating: not required */ + /* don't bother invalidating if DMA to device */ + if (dir != DMA_TO_DEVICE) + outer_inv_range(paddr, paddr + size); + dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); } EXPORT_SYMBOL(___dma_page_dev_to_cpu); -- cgit v1.1 From 31aa8fd6fd30b0f36416df7d09619768d26b4332 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 11:10:03 +0000 Subject: ARM: Add caller information to ioremap This allows the procfs vmallocinfo file to show who created the ioremap regions. Note: __builtin_return_address(0) doesn't do what's expected if its used in an inline function, so we leave __arm_ioremap callers in such places alone. Signed-off-by: Russell King --- arch/arm/mm/ioremap.c | 57 ++++++++++++++++++++++++++++++--------------------- arch/arm/mm/nommu.c | 12 +++++++++++ 2 files changed, 46 insertions(+), 23 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 0ab75c6..28c8b95 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -139,8 +139,8 @@ void __check_kvm_seq(struct mm_struct *mm) * which requires the new ioremap'd region to be referenced, the CPU will * reference the _old_ region. * - * Note that get_vm_area() allocates a guard 4K page, so we need to mask - * the size back to 1MB aligned or we will overflow in the loop below. + * Note that get_vm_area_caller() allocates a guard 4K page, so we need to + * mask the size back to 1MB aligned or we will overflow in the loop below. */ static void unmap_area_sections(unsigned long virt, unsigned long size) { @@ -254,22 +254,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, } #endif - -/* - * Remap an arbitrary physical address space into the kernel virtual - * address space. Needed when the kernel wants to access high addresses - * directly. - * - * NOTE! We need to allow non-page-aligned mappings too: we will obviously - * have to convert them into an offset in a page-aligned mapping, but the - * caller shouldn't need to know that small detail. - * - * 'flags' are the extra L_PTE_ flags that you want to specify for this - * mapping. See for more information. - */ -void __iomem * -__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, - unsigned int mtype) +void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, + unsigned long offset, size_t size, unsigned int mtype, void *caller) { const struct mem_type *type; int err; @@ -291,7 +277,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, */ size = PAGE_ALIGN(offset + size); - area = get_vm_area(size, VM_IOREMAP); + area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) return NULL; addr = (unsigned long)area->addr; @@ -318,10 +304,9 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, flush_cache_vmap(addr, addr + size); return (void __iomem *) (offset + addr); } -EXPORT_SYMBOL(__arm_ioremap_pfn); -void __iomem * -__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) +void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, + unsigned int mtype, void *caller) { unsigned long last_addr; unsigned long offset = phys_addr & ~PAGE_MASK; @@ -334,7 +319,33 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) if (!size || last_addr < phys_addr) return NULL; - return __arm_ioremap_pfn(pfn, offset, size, mtype); + return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, + caller); +} + +/* + * Remap an arbitrary physical address space into the kernel virtual + * address space. Needed when the kernel wants to access high addresses + * directly. + * + * NOTE! We need to allow non-page-aligned mappings too: we will obviously + * have to convert them into an offset in a page-aligned mapping, but the + * caller shouldn't need to know that small detail. + */ +void __iomem * +__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, + unsigned int mtype) +{ + return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(__arm_ioremap_pfn); + +void __iomem * +__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) +{ + return __arm_ioremap_caller(phys_addr, size, mtype, + __builtin_return_address(0)); } EXPORT_SYMBOL(__arm_ioremap); diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 374a831..9bfeb6b 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -74,6 +74,12 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, } EXPORT_SYMBOL(__arm_ioremap_pfn); +void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, + size_t size, unsigned int mtype, void *caller) +{ + return __arm_ioremap_pfn(pfn, offset, size, mtype); +} + void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) { @@ -81,6 +87,12 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, } EXPORT_SYMBOL(__arm_ioremap); +void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, + unsigned int mtype, void *caller) +{ + return __arm_ioremap(phys_addr, size, mtype); +} + void __iounmap(volatile void __iomem *addr) { } -- cgit v1.1 From e119bfff1f102f8d1505910cd6c09df55c776b43 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 10 Jan 2010 17:23:29 +0000 Subject: ARM: Move creation of /proc/cpu out of alignment.c Always creating this directory avoids other users having to jump through silly hoops when they want to share this directory. Signed-off-by: Russell King --- arch/arm/mm/alignment.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index b270d62..0c5eb69 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -898,11 +898,7 @@ static int __init alignment_init(void) #ifdef CONFIG_PROC_FS struct proc_dir_entry *res; - res = proc_mkdir("cpu", NULL); - if (!res) - return -ENOMEM; - - res = create_proc_entry("alignment", S_IWUSR | S_IRUGO, res); + res = create_proc_entry("cpu/alignment", S_IWUSR | S_IRUGO, NULL); if (!res) return -ENOMEM; -- cgit v1.1 From 2b0d8c251b8876d530a6bf671eb5425838fa698a Mon Sep 17 00:00:00 2001 From: Jeremy Kerr Date: Mon, 11 Jan 2010 23:17:34 +0100 Subject: ARM: 5880/1: arm: use generic infrastructure for early params The ARM setup code includes its own parser for early params, there's also one in the generic init code. This patch removes __early_init (and related code) from arch/arm/kernel/setup.c, and changes users to the generic early_init macro instead. The generic macro takes a char * argument, rather than char **, so we need to update the parser functions a little. Signed-off-by: Jeremy Kerr Signed-off-by: Russell King --- arch/arm/mm/init.c | 12 +++++++----- arch/arm/mm/mmu.c | 41 +++++++++++++++++++++-------------------- 2 files changed, 28 insertions(+), 25 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index a04ffbb..a340569 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -32,19 +32,21 @@ static unsigned long phys_initrd_start __initdata = 0; static unsigned long phys_initrd_size __initdata = 0; -static void __init early_initrd(char **p) +static int __init early_initrd(char *p) { unsigned long start, size; + char *endp; - start = memparse(*p, p); - if (**p == ',') { - size = memparse((*p) + 1, p); + start = memparse(p, &endp); + if (*endp == ',') { + size = memparse(endp + 1, NULL); phys_initrd_start = start; phys_initrd_size = size; } + return 0; } -__early_param("initrd=", early_initrd); +early_param("initrd", early_initrd); static int __init parse_tag_initrd(const struct tag *tag) { diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 1708da8..88f5d71 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -100,18 +100,17 @@ static struct cachepolicy cache_policies[] __initdata = { * writebuffer to be turned off. (Note: the write * buffer should not be on and the cache off). */ -static void __init early_cachepolicy(char **p) +static int __init early_cachepolicy(char *p) { int i; for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { int len = strlen(cache_policies[i].policy); - if (memcmp(*p, cache_policies[i].policy, len) == 0) { + if (memcmp(p, cache_policies[i].policy, len) == 0) { cachepolicy = i; cr_alignment &= ~cache_policies[i].cr_mask; cr_no_alignment &= ~cache_policies[i].cr_mask; - *p += len; break; } } @@ -130,36 +129,37 @@ static void __init early_cachepolicy(char **p) } flush_cache_all(); set_cr(cr_alignment); + return 0; } -__early_param("cachepolicy=", early_cachepolicy); +early_param("cachepolicy", early_cachepolicy); -static void __init early_nocache(char **__unused) +static int __init early_nocache(char *__unused) { char *p = "buffered"; printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); - early_cachepolicy(&p); + early_cachepolicy(p); + return 0; } -__early_param("nocache", early_nocache); +early_param("nocache", early_nocache); -static void __init early_nowrite(char **__unused) +static int __init early_nowrite(char *__unused) { char *p = "uncached"; printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); - early_cachepolicy(&p); + early_cachepolicy(p); + return 0; } -__early_param("nowb", early_nowrite); +early_param("nowb", early_nowrite); -static void __init early_ecc(char **p) +static int __init early_ecc(char *p) { - if (memcmp(*p, "on", 2) == 0) { + if (memcmp(p, "on", 2) == 0) ecc_mask = PMD_PROTECTION; - *p += 2; - } else if (memcmp(*p, "off", 3) == 0) { + else if (memcmp(p, "off", 3) == 0) ecc_mask = 0; - *p += 3; - } + return 0; } -__early_param("ecc=", early_ecc); +early_param("ecc", early_ecc); static int __init noalign_setup(char *__unused) { @@ -670,9 +670,9 @@ static unsigned long __initdata vmalloc_reserve = SZ_128M; * bytes. This can be used to increase (or decrease) the vmalloc * area - the default is 128m. */ -static void __init early_vmalloc(char **arg) +static int __init early_vmalloc(char *arg) { - vmalloc_reserve = memparse(*arg, arg); + vmalloc_reserve = memparse(arg, NULL); if (vmalloc_reserve < SZ_16M) { vmalloc_reserve = SZ_16M; @@ -687,8 +687,9 @@ static void __init early_vmalloc(char **arg) "vmalloc area is too big, limiting to %luMB\n", vmalloc_reserve >> 20); } + return 0; } -__early_param("vmalloc=", early_vmalloc); +early_param("vmalloc", early_vmalloc); #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) -- cgit v1.1 From 11805bcfa411c816b7c76fc40724be6733c74ffc Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 26 Jan 2010 19:09:42 +0100 Subject: ARM: 5905/1: ARM: Global ASID allocation on SMP The current ASID allocation algorithm doesn't ensure the notification of the other CPUs when the ASID rolls over. This may lead to two processes using the same ASID (but different generation) or multiple threads of the same process using different ASIDs. This patch adds the broadcasting of the ASID rollover event to the other CPUs. To avoid a race on multiple CPUs modifying "cpu_last_asid" during the handling of the broadcast, the ASID numbering now starts at "smp_processor_id() + 1". At rollover, the cpu_last_asid will be set to NR_CPUS. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/context.c | 124 ++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 110 insertions(+), 14 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a9e22e3..b0ee9ba 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -10,12 +10,17 @@ #include #include #include +#include +#include #include #include static DEFINE_SPINLOCK(cpu_asid_lock); unsigned int cpu_last_asid = ASID_FIRST_VERSION; +#ifdef CONFIG_SMP +DEFINE_PER_CPU(struct mm_struct *, current_mm); +#endif /* * We fork()ed a process, and we need a new context for the child @@ -26,13 +31,109 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION; void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) { mm->context.id = 0; + spin_lock_init(&mm->context.id_lock); } +static void flush_context(void) +{ + /* set the reserved ASID before flushing the TLB */ + asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); + isb(); + local_flush_tlb_all(); + if (icache_is_vivt_asid_tagged()) { + __flush_icache_all(); + dsb(); + } +} + +#ifdef CONFIG_SMP + +static void set_mm_context(struct mm_struct *mm, unsigned int asid) +{ + unsigned long flags; + + /* + * Locking needed for multi-threaded applications where the + * same mm->context.id could be set from different CPUs during + * the broadcast. This function is also called via IPI so the + * mm->context.id_lock has to be IRQ-safe. + */ + spin_lock_irqsave(&mm->context.id_lock, flags); + if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { + /* + * Old version of ASID found. Set the new one and + * reset mm_cpumask(mm). + */ + mm->context.id = asid; + cpumask_clear(mm_cpumask(mm)); + } + spin_unlock_irqrestore(&mm->context.id_lock, flags); + + /* + * Set the mm_cpumask(mm) bit for the current CPU. + */ + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); +} + +/* + * Reset the ASID on the current CPU. This function call is broadcast + * from the CPU handling the ASID rollover and holding cpu_asid_lock. + */ +static void reset_context(void *info) +{ + unsigned int asid; + unsigned int cpu = smp_processor_id(); + struct mm_struct *mm = per_cpu(current_mm, cpu); + + /* + * Check if a current_mm was set on this CPU as it might still + * be in the early booting stages and using the reserved ASID. + */ + if (!mm) + return; + + smp_rmb(); + asid = cpu_last_asid + cpu + 1; + + flush_context(); + set_mm_context(mm, asid); + + /* set the new ASID */ + asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id)); + isb(); +} + +#else + +static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) +{ + mm->context.id = asid; + cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); +} + +#endif + void __new_context(struct mm_struct *mm) { unsigned int asid; spin_lock(&cpu_asid_lock); +#ifdef CONFIG_SMP + /* + * Check the ASID again, in case the change was broadcast from + * another CPU before we acquired the lock. + */ + if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); + spin_unlock(&cpu_asid_lock); + return; + } +#endif + /* + * At this point, it is guaranteed that the current mm (with + * an old ASID) isn't active on any other CPU since the ASIDs + * are changed simultaneously via IPI. + */ asid = ++cpu_last_asid; if (asid == 0) asid = cpu_last_asid = ASID_FIRST_VERSION; @@ -42,20 +143,15 @@ void __new_context(struct mm_struct *mm) * to start a new version and flush the TLB. */ if (unlikely((asid & ~ASID_MASK) == 0)) { - asid = ++cpu_last_asid; - /* set the reserved ASID before flushing the TLB */ - asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n" - : - : "r" (0)); - isb(); - flush_tlb_all(); - if (icache_is_vivt_asid_tagged()) { - __flush_icache_all(); - dsb(); - } + asid = cpu_last_asid + smp_processor_id() + 1; + flush_context(); +#ifdef CONFIG_SMP + smp_wmb(); + smp_call_function(reset_context, NULL, 1); +#endif + cpu_last_asid += NR_CPUS; } - spin_unlock(&cpu_asid_lock); - cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); - mm->context.id = asid; + set_mm_context(mm, asid); + spin_unlock(&cpu_asid_lock); } -- cgit v1.1 From 1a28e3d977860dc760909083df625b300f695680 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 1 Feb 2010 23:30:26 +0100 Subject: ARM: 5911/1: ARM: Select CPU_32v6K for CPU_V7 only if ARCH_OMAP2 is not selected Otherwise the kernel built with both CPU_V6 and CPU_V7 will not boot on omap2. Signed-off-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/mm/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index baf6384..4c2e90d 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -399,7 +399,7 @@ config CPU_V6 config CPU_32v6K bool "Support ARM V6K processor extensions" if !SMP depends on CPU_V6 - default y if SMP && !ARCH_MX3 + default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) help Say Y here if your ARMv6 processor supports the 'K' extension. This enables the kernel to use some instructions not present @@ -410,7 +410,7 @@ config CPU_32v6K # ARMv7 config CPU_V7 bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX - select CPU_32v6K + select CPU_32v6K if !ARCH_OMAP2 select CPU_32v7 select CPU_ABRT_EV7 select CPU_PABRT_V7 -- cgit v1.1 From 424d6b145f863d012c540082d0c1afb5bb4dea48 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Thu, 4 Feb 2010 19:35:06 +0100 Subject: ARM: 5916/1: ARM: L2 : Add maintainace by line helper functions This patch adds the cache maintainance by line helper functions. Signed-off-by: Santosh Shilimkar Acked-by: Catalin Marinas Acked-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index cb8fc65..1a14d18 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -42,6 +42,27 @@ static inline void cache_sync(void) cache_wait(base + L2X0_CACHE_SYNC, 1); } +static inline void l2x0_clean_line(unsigned long addr) +{ + void __iomem *base = l2x0_base; + cache_wait(base + L2X0_CLEAN_LINE_PA, 1); + writel(addr, base + L2X0_CLEAN_LINE_PA); +} + +static inline void l2x0_inv_line(unsigned long addr) +{ + void __iomem *base = l2x0_base; + cache_wait(base + L2X0_INV_LINE_PA, 1); + writel(addr, base + L2X0_INV_LINE_PA); +} + +static inline void l2x0_flush_line(unsigned long addr) +{ + void __iomem *base = l2x0_base; + cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); + writel(addr, base + L2X0_CLEAN_INV_LINE_PA); +} + static inline void l2x0_inv_all(void) { unsigned long flags; @@ -62,23 +83,20 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) spin_lock_irqsave(&l2x0_lock, flags); if (start & (CACHE_LINE_SIZE - 1)) { start &= ~(CACHE_LINE_SIZE - 1); - cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); - writel(start, base + L2X0_CLEAN_INV_LINE_PA); + l2x0_flush_line(start); start += CACHE_LINE_SIZE; } if (end & (CACHE_LINE_SIZE - 1)) { end &= ~(CACHE_LINE_SIZE - 1); - cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); - writel(end, base + L2X0_CLEAN_INV_LINE_PA); + l2x0_flush_line(end); } while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); while (start < blk_end) { - cache_wait(base + L2X0_INV_LINE_PA, 1); - writel(start, base + L2X0_INV_LINE_PA); + l2x0_inv_line(start); start += CACHE_LINE_SIZE; } @@ -103,8 +121,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) unsigned long blk_end = start + min(end - start, 4096UL); while (start < blk_end) { - cache_wait(base + L2X0_CLEAN_LINE_PA, 1); - writel(start, base + L2X0_CLEAN_LINE_PA); + l2x0_clean_line(start); start += CACHE_LINE_SIZE; } @@ -129,8 +146,7 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) unsigned long blk_end = start + min(end - start, 4096UL); while (start < blk_end) { - cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); - writel(start, base + L2X0_CLEAN_INV_LINE_PA); + l2x0_flush_line(start); start += CACHE_LINE_SIZE; } -- cgit v1.1 From d309427e792ea750cdd312e7a92cf6047ae44962 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Thu, 4 Feb 2010 19:37:09 +0100 Subject: ARM: 5917/1: OMAP4: Add L2 Cache support This patch adds L2 Cache support for OMAP4. External L2 cache is used in OMAP4 CC: Catalin Marinas Signed-off-by: Santosh Shilimkar Acked-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/mm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 4c2e90d..e859743 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -754,7 +754,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH config CACHE_L2X0 bool "Enable the L2x0 outer cache controller" depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ - REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK + REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4 default y select OUTER_CACHE help -- cgit v1.1 From 9e65582a8e8715f883a34eea66e0643778ce878d Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Thu, 4 Feb 2010 19:42:42 +0100 Subject: ARM: 5919/1: ARM: L2 : Errata 588369: Clean & Invalidate do not invalidate clean lines This patch implements the work-around for the errata 588369.The secure API is used to alter L2 debug register because of trust-zone. This version updated with comments from Russell and Catalin and generated against 2.6.33-rc6 mainline kernel. Detail comments can be found: http://www.spinics.net/lists/linux-omap/msg23431.html Signed-off-by: Woodruff Richard Signed-off-by: Santosh Shilimkar Acked-by: Catalin Marinas Acked-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 1a14d18..0733463 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -56,12 +56,42 @@ static inline void l2x0_inv_line(unsigned long addr) writel(addr, base + L2X0_INV_LINE_PA); } +#ifdef CONFIG_PL310_ERRATA_588369 +static void debug_writel(unsigned long val) +{ + extern void omap_smc1(u32 fn, u32 arg); + + /* + * Texas Instrument secure monitor api to modify the + * PL310 Debug Control Register. + */ + omap_smc1(0x100, val); +} + +static inline void l2x0_flush_line(unsigned long addr) +{ + void __iomem *base = l2x0_base; + + /* Clean by PA followed by Invalidate by PA */ + cache_wait(base + L2X0_CLEAN_LINE_PA, 1); + writel(addr, base + L2X0_CLEAN_LINE_PA); + cache_wait(base + L2X0_INV_LINE_PA, 1); + writel(addr, base + L2X0_INV_LINE_PA); +} +#else + +/* Optimised out for non-errata case */ +static inline void debug_writel(unsigned long val) +{ +} + static inline void l2x0_flush_line(unsigned long addr) { void __iomem *base = l2x0_base; cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); writel(addr, base + L2X0_CLEAN_INV_LINE_PA); } +#endif static inline void l2x0_inv_all(void) { @@ -83,13 +113,17 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) spin_lock_irqsave(&l2x0_lock, flags); if (start & (CACHE_LINE_SIZE - 1)) { start &= ~(CACHE_LINE_SIZE - 1); + debug_writel(0x03); l2x0_flush_line(start); + debug_writel(0x00); start += CACHE_LINE_SIZE; } if (end & (CACHE_LINE_SIZE - 1)) { end &= ~(CACHE_LINE_SIZE - 1); + debug_writel(0x03); l2x0_flush_line(end); + debug_writel(0x00); } while (start < end) { @@ -145,10 +179,12 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); + debug_writel(0x03); while (start < blk_end) { l2x0_flush_line(start); start += CACHE_LINE_SIZE; } + debug_writel(0x00); if (blk_end < end) { spin_unlock_irqrestore(&l2x0_lock, flags); -- cgit v1.1 From db9ef1af4879c121c354ad2f653f185f1d50fd89 Mon Sep 17 00:00:00 2001 From: Fenkart/Bostandzhyan Date: Sun, 7 Feb 2010 21:45:47 +0100 Subject: ARM: 5926/1: Add "Virtual kernel memory..." printout. Code based on parisc and x86_32. Tested-by: H Hartley Sweeten Signed-off-by: Andreas Fenkart Signed-off-by: Russell King --- arch/arm/mm/init.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 69 insertions(+), 9 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index a340569..e8e3a74 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -562,7 +563,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) */ void __init mem_init(void) { - unsigned int codesize, datasize, initsize; + unsigned long reserved_pages, free_pages; int i, node; #ifndef CONFIG_DISCONTIGMEM @@ -598,6 +599,33 @@ void __init mem_init(void) totalram_pages += totalhigh_pages; #endif + reserved_pages = free_pages = 0; + + for_each_online_node(node) { + pg_data_t *n = NODE_DATA(node); + struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; + + for_each_nodebank(i, &meminfo, node) { + struct membank *bank = &meminfo.bank[i]; + unsigned int pfn1, pfn2; + struct page *page, *end; + + pfn1 = bank_pfn_start(bank); + pfn2 = bank_pfn_end(bank); + + page = map + pfn1; + end = map + pfn2; + + do { + if (PageReserved(page)) + reserved_pages++; + else if (!page_count(page)) + free_pages++; + page++; + } while (page < end); + } + } + /* * Since our memory may not be contiguous, calculate the * real number of pages we have in this system @@ -610,16 +638,48 @@ void __init mem_init(void) } printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); - codesize = _etext - _text; - datasize = _end - _data; - initsize = __init_end - __init_begin; - - printk(KERN_NOTICE "Memory: %luKB available (%dK code, " - "%dK data, %dK init, %luK highmem)\n", - nr_free_pages() << (PAGE_SHIFT-10), codesize >> 10, - datasize >> 10, initsize >> 10, + printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", + nr_free_pages() << (PAGE_SHIFT-10), + free_pages << (PAGE_SHIFT-10), + reserved_pages << (PAGE_SHIFT-10), totalhigh_pages << (PAGE_SHIFT-10)); +#define MLK(b, t) b, t, ((t) - (b)) >> 10 +#define MLM(b, t) b, t, ((t) - (b)) >> 20 +#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) + + printk(KERN_NOTICE "Virtual kernel memory layout:\n" + " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" + " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" + " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" + " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" +#ifdef CONFIG_HIGHMEM + " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" +#endif + " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" + " .init : 0x%p" " - 0x%p" " (%4d kB)\n" + " .text : 0x%p" " - 0x%p" " (%4d kB)\n" + " .data : 0x%p" " - 0x%p" " (%4d kB)\n", + + MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + + (PAGE_SIZE)), + MLK(FIXADDR_START, FIXADDR_TOP), + MLM(VMALLOC_START, (unsigned long)VMALLOC_END), + MLM(PAGE_OFFSET, (unsigned long)high_memory), +#ifdef CONFIG_HIGHMEM + MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * + (PAGE_SIZE)), +#endif + MLM(MODULES_VADDR, MODULES_END), + + MLK_ROUNDUP(__init_begin, __init_end), + MLK_ROUNDUP(_text, _etext), + MLK_ROUNDUP(_data, _edata)); + +#undef MLK +#undef MLM +#undef MLK_ROUNDUP + if (PAGE_SIZE >= 16384 && num_physpages <= 128) { extern int sysctl_overcommit_memory; /* -- cgit v1.1 From a7bd08c82e4f74387a39eeebb942712f23967420 Mon Sep 17 00:00:00 2001 From: Fenkart/Bostandzhyan Date: Sun, 7 Feb 2010 21:46:33 +0100 Subject: ARM: 5927/1: Make delimiters of DMA area globally visibly. Adds DMA area to 'virtual memory map' startup message Tested-by: H Hartley Sweeten Signed-off-by: Andreas Fenkart Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 3 --- arch/arm/mm/init.c | 6 ++++++ 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 26325cb..48eedab 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -29,9 +29,6 @@ #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" #endif -#define CONSISTENT_END (0xffe00000) -#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) - #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index e8e3a74..bda481e 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -651,6 +651,9 @@ void __init mem_init(void) printk(KERN_NOTICE "Virtual kernel memory layout:\n" " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" +#ifdef CONFIG_MMU + " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" +#endif " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" #ifdef CONFIG_HIGHMEM @@ -664,6 +667,9 @@ void __init mem_init(void) MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + (PAGE_SIZE)), MLK(FIXADDR_START, FIXADDR_TOP), +#ifdef CONFIG_MMU + MLM(CONSISTENT_BASE, CONSISTENT_END), +#endif MLM(VMALLOC_START, (unsigned long)VMALLOC_END), MLM(PAGE_OFFSET, (unsigned long)high_memory), #ifdef CONFIG_HIGHMEM -- cgit v1.1 From c931b4f655a1b86c929384e674eb8c31795f3bd7 Mon Sep 17 00:00:00 2001 From: Fenkart/Bostandzhyan Date: Sun, 7 Feb 2010 21:47:17 +0100 Subject: ARM: 5928/1: Change type of VMALLOC_END to unsigned long. Makes it consistent with VMALLOC_START Tested-by: H Hartley Sweeten Signed-off-by: Andreas Fenkart Signed-off-by: Russell King --- arch/arm/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index bda481e..3a20772 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -670,7 +670,7 @@ void __init mem_init(void) #ifdef CONFIG_MMU MLM(CONSISTENT_BASE, CONSISTENT_END), #endif - MLM(VMALLOC_START, (unsigned long)VMALLOC_END), + MLM(VMALLOC_START, VMALLOC_END), MLM(PAGE_OFFSET, (unsigned long)high_memory), #ifdef CONFIG_HIGHMEM MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * -- cgit v1.1 From a183927213df225bd93d21857b6aaafbb95e590d Mon Sep 17 00:00:00 2001 From: Fenkart/Bostandzhyan Date: Sun, 7 Feb 2010 21:47:58 +0100 Subject: ARM: 5929/1: Add checks to detect overlap of memory regions. Tested-by: H Hartley Sweeten Signed-off-by: Andreas Fenkart Signed-off-by: Russell King --- arch/arm/mm/init.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 3a20772..7829cb5 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -686,6 +686,23 @@ void __init mem_init(void) #undef MLM #undef MLK_ROUNDUP + /* + * Check boundaries twice: Some fundamental inconsistencies can + * be detected at build time already. + */ +#ifdef CONFIG_MMU + BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE); + BUG_ON(VMALLOC_END > CONSISTENT_BASE); + + BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); + BUG_ON(TASK_SIZE > MODULES_VADDR); +#endif + +#ifdef CONFIG_HIGHMEM + BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); + BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); +#endif + if (PAGE_SIZE >= 16384 && num_physpages <= 128) { extern int sysctl_overcommit_memory; /* -- cgit v1.1 From 4b3073e1c53a256275f1079c0fbfbe85883d9275 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 16:40:18 +0000 Subject: MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself On VIVT ARM, when we have multiple shared mappings of the same file in the same MM, we need to ensure that we have coherency across all copies. We do this via make_coherent() by making the pages uncacheable. This used to work fine, until we allowed highmem with highpte - we now have a page table which is mapped as required, and is not available for modification via update_mmu_cache(). Ralf Beache suggested getting rid of the PTE value passed to update_mmu_cache(): On MIPS update_mmu_cache() calls __update_tlb() which walks pagetables to construct a pointer to the pte again. Passing a pte_t * is much more elegant. Maybe we might even replace the pte argument with the pte_t? Ben Herrenschmidt would also like the pte pointer for PowerPC: Passing the ptep in there is exactly what I want. I want that -instead- of the PTE value, because I have issue on some ppc cases, for I$/D$ coherency, where set_pte_at() may decide to mask out the _PAGE_EXEC. So, pass in the mapped page table pointer into update_mmu_cache(), and remove the PTE value, updating all implementations and call sites to suit. Includes a fix from Stephen Rothwell: sparc: fix fallout from update_mmu_cache API change Signed-off-by: Stephen Rothwell Acked-by: Benjamin Herrenschmidt Signed-off-by: Russell King --- arch/arm/mm/fault-armv.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index ae88f2c..c45f9bb 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -149,9 +149,10 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne * * Note that the pte lock will be held. */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) { - unsigned long pfn = pte_pfn(pte); + unsigned long pfn = pte_pfn(*ptep); struct address_space *mapping; struct page *page; -- cgit v1.1 From ae1402022edbeef3991f1e4bae8fa99558be291b Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 16:43:57 +0000 Subject: ARM: make_coherent(): fix problems with highpte, part 2 update_mmu_cache() is called with the page table for the faulted-in page still mapped. We need to modify the PTE for this page to ensure coherency with other shared mappings when multiple shared mappings exist within a MM. Signed-off-by: Russell King --- arch/arm/mm/fault-armv.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index c45f9bb..c9b97e9 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -99,7 +99,8 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, } static void -make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) +make_coherent(struct address_space *mapping, struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, unsigned long pfn) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *mpnt; @@ -131,7 +132,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne } flush_dcache_mmap_unlock(mapping); if (aliases) - adjust_pte(vma, addr, pfn); + do_adjust_pte(vma, addr, pfn, ptep); else flush_cache_page(vma, addr, pfn); } @@ -174,7 +175,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, #endif if (mapping) { if (cache_is_vivt()) - make_coherent(mapping, vma, addr, pfn); + make_coherent(mapping, vma, addr, ptep, pfn); else if (vma->vm_flags & VM_EXEC) __flush_icache_all(); } -- cgit v1.1 From d6d502fa4be1acd01971476fc732c95a4da16d90 Mon Sep 17 00:00:00 2001 From: Kukjin Kim Date: Mon, 22 Feb 2010 00:02:59 +0100 Subject: ARM: 5952/1: ARM: MM: Add ARM_L1_CACHE_SHIFT_6 for handle inside each ARCH Kconfig Add ARM_L1_CACHE_SHIFT_6 to arch/arm/Kconfig to allow CPUs with L1 cache lines which are 64bytes to indicate this without having to alter the arch/arm/mm/Kconfig entry each time. Update the mm Kconfig so that ARM_L1_CACHE_SHIFT default value uses this and change OMAP3 and S5PC1XX to select ARM_L1_CACHE_SHIFT_6. Acked-by: Ben Dooks Acked-by: Tony Lindgren Signed-off-by: Kukjin Kim Signed-off-by: Russell King --- arch/arm/mm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index e859743..c4ed9f9 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -779,5 +779,5 @@ config CACHE_XSC3L2 config ARM_L1_CACHE_SHIFT int - default 6 if ARCH_OMAP3 || ARCH_S5PC1XX + default 6 if ARM_L1_CACHE_SHIFT_6 default 5 -- cgit v1.1