From a6325247f50628c7e53a483807d0ef2c24a7aa90 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 6 Oct 2009 21:22:21 +0000 Subject: sh: Sprinkle __uses_jump_to_uncached Fix some callers of jump_to_uncached() and back_to_cached() that were not annotated with __uses_jump_to_uncached. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh4.c | 2 +- arch/sh/mm/cache-sh7705.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index b2453bb..a98c7d8 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -43,7 +43,7 @@ static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = * Called from kernel/module.c:sys_init_module and routine for a.out format, * signal handler code and kprobes code */ -static void sh4_flush_icache_range(void *args) +static void __uses_jump_to_uncached sh4_flush_icache_range(void *args) { struct flusher_data *data = args; unsigned long start, end; diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 2cadee2..2601935 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -78,7 +78,7 @@ static void sh7705_flush_icache_range(void *args) /* * Writeback&Invalidate the D-cache of the page */ -static void __flush_dcache_page(unsigned long phys) +static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) { unsigned long ways, waysize, addrstart; unsigned long flags; @@ -144,7 +144,7 @@ static void sh7705_flush_dcache_page(void *arg) __flush_dcache_page(PHYSADDR(page_address(page))); } -static void sh7705_flush_cache_all(void *args) +static void __uses_jump_to_uncached sh7705_flush_cache_all(void *args) { unsigned long flags; -- cgit v1.1 From fc2bdefdde89b54d8fcde7bbf7d0adc0ce5cb044 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 6 Oct 2009 21:22:22 +0000 Subject: sh: Plug PMB alloc memory leak If we fail to allocate a PMB entry in pmb_remap() we must remember to clear and free any PMB entries that we may have previously allocated, e.g. if we were allocating a multiple entry mapping. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index b1a714a..58f9358 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -33,6 +33,8 @@ #define NR_PMB_ENTRIES 16 +static void __pmb_unmap(struct pmb_entry *); + static struct kmem_cache *pmb_cache; static unsigned long pmb_map; @@ -218,9 +220,10 @@ static struct { long pmb_remap(unsigned long vaddr, unsigned long phys, unsigned long size, unsigned long flags) { - struct pmb_entry *pmbp; + struct pmb_entry *pmbp, *pmbe; unsigned long wanted; int pmb_flags, i; + long err; /* Convert typical pgprot value to the PMB equivalent */ if (flags & _PAGE_CACHABLE) { @@ -236,20 +239,22 @@ long pmb_remap(unsigned long vaddr, unsigned long phys, again: for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { - struct pmb_entry *pmbe; int ret; if (size < pmb_sizes[i].size) continue; pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); - if (IS_ERR(pmbe)) - return PTR_ERR(pmbe); + if (IS_ERR(pmbe)) { + err = PTR_ERR(pmbe); + goto out; + } ret = set_pmb_entry(pmbe); if (ret != 0) { pmb_free(pmbe); - return -EBUSY; + err = -EBUSY; + goto out; } phys += pmb_sizes[i].size; @@ -270,6 +275,12 @@ again: goto again; return wanted - size; + +out: + if (pmbp) + __pmb_unmap(pmbp); + + return err; } void pmb_unmap(unsigned long addr) @@ -283,12 +294,19 @@ void pmb_unmap(unsigned long addr) if (unlikely(!pmbe)) return; + __pmb_unmap(pmbe); +} + +static void __pmb_unmap(struct pmb_entry *pmbe) +{ WARN_ON(!test_bit(pmbe->entry, &pmb_map)); do { struct pmb_entry *pmblink = pmbe; - clear_pmb_entry(pmbe); + if (pmbe->entry != PMB_NO_ENTRY) + clear_pmb_entry(pmbe); + pmbe = pmblink->link; pmb_free(pmblink); -- cgit v1.1 From 2bea7ea7d57fd0022f4cd08ed3d4eb2d39a2920d Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 6 Oct 2009 21:22:27 +0000 Subject: sh: Try PMB mapping based on physical address, not mapping size We should favour PMB mappings when the physical address cannot be reached with 29-bits. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/mm/ioremap_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c index c325061..a86eaa9 100644 --- a/arch/sh/mm/ioremap_32.c +++ b/arch/sh/mm/ioremap_32.c @@ -83,7 +83,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, * * PMB entries are all pre-faulted. */ - if (unlikely(size >= 0x1000000)) { + if (unlikely(phys_addr >= P1SEG)) { unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); if (likely(mapped)) { -- cgit v1.1 From a2767cfb1d9d97c3f861743f1ad595a80b75ec99 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 6 Oct 2009 21:22:34 +0000 Subject: sh: Don't allocate smaller sized mappings on every iteration Currently, we've got the less than ideal situation where if we need to allocate a 256MB mapping we'll allocate four entries like so, entry 1: 128MB entry 2: 64MB entry 3: 16MB entry 4: 16MB This is because as we execute the loop in pmb_remap() we will progressively try mapping the remaining address space with smaller and smaller sizes. This isn't good because the size we use on one iteration may be the perfect size to use on the next iteration, for instance when the initial size is divisible by one of the PMB mapping sizes. With this patch, we now only need two entries in the PMB to map 256MB of address space, entry 1: 128MB entry 2: 128MB Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 58f9358..aade3110 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -269,6 +269,13 @@ again: pmbp->link = pmbe; pmbp = pmbe; + + /* + * Instead of trying smaller sizes on every iteration + * (even if we succeed in allocating space), try using + * pmb_sizes[i].size again. + */ + i--; } if (size >= 0x1000000) -- cgit v1.1 From 964f7e5a56814b32c727821de77d22bd7ef782bc Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 13 Oct 2009 11:18:34 +0900 Subject: sh: force dcache flush if dcache_dirty bit set. This too follows the ARM change, given that the issue at hand applies to all platforms that implement lazy D-cache writeback. This fixes up the case when a page mapping disappears between the flush_dcache_page() call (when PG_dcache_dirty is set for the page) and the update_mmu_cache() call -- such as in the case of swap cache being freed early. This kills off the mapping test in update_mmu_cache() and switches to simply testing for PG_dcache_dirty. Reported-by: Nitin Gupta Reported-by: Hugh Dickins Signed-off-by: Paul Mundt --- arch/sh/mm/cache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 35c37b7..5e1091b 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -128,7 +128,7 @@ void __update_cache(struct vm_area_struct *vma, return; page = pfn_to_page(pfn); - if (pfn_valid(pfn) && page_mapping(page)) { + if (pfn_valid(pfn)) { int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); if (dirty) { unsigned long addr = (unsigned long)page_address(page); -- cgit v1.1 From a7a7c0e1d12bcfb4a96cae439951232b08c91841 Mon Sep 17 00:00:00 2001 From: Valentin Sitdikov Date: Fri, 16 Oct 2009 14:15:38 +0900 Subject: sh: Fix up single page flushing to use PAGE_SIZE. Presently The SH-4 cache flushing code uses flush_cache_4096() for most of the real flushing work, which breaks down to a fixed 4096 unroll and increment. Not only is this sub-optimal for larger page sizes, it's also uncovered a bug in sh4_flush_dcache_page() when large page sizes are used and we have no cache aliases -- resulting in only a part of the page's D-cache lines being written back. Signed-off-by: Valentin Sitdikov Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh4.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index a98c7d8..519e2d1 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -26,7 +26,7 @@ #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ #define MAX_ICACHE_PAGES 32 -static void __flush_cache_4096(unsigned long addr, unsigned long phys, +static void __flush_cache_one(unsigned long addr, unsigned long phys, unsigned long exec_offset); /* @@ -89,8 +89,7 @@ static void __uses_jump_to_uncached sh4_flush_icache_range(void *args) local_irq_restore(flags); } -static inline void flush_cache_4096(unsigned long start, - unsigned long phys) +static inline void flush_cache_one(unsigned long start, unsigned long phys) { unsigned long flags, exec_offset = 0; @@ -103,8 +102,7 @@ static inline void flush_cache_4096(unsigned long start, exec_offset = 0x20000000; local_irq_save(flags); - __flush_cache_4096(start | SH_CACHE_ASSOC, - P1SEGADDR(phys), exec_offset); + __flush_cache_one(start | SH_CACHE_ASSOC, P1SEGADDR(phys), exec_offset); local_irq_restore(flags); } @@ -129,8 +127,8 @@ static void sh4_flush_dcache_page(void *arg) /* Loop all the D-cache */ n = boot_cpu_data.dcache.n_aliases; - for (i = 0; i < n; i++, addr += 4096) - flush_cache_4096(addr, phys); + for (i = 0; i < n; i++, addr += PAGE_SIZE) + flush_cache_one(addr, phys); } wmb(); @@ -318,11 +316,11 @@ static void sh4_flush_cache_page(void *args) /* We only need to flush D-cache when we have alias */ if ((address^phys) & alias_mask) { /* Loop 4K of the D-cache */ - flush_cache_4096( + flush_cache_one( CACHE_OC_ADDRESS_ARRAY | (address & alias_mask), phys); /* Loop another 4K of the D-cache */ - flush_cache_4096( + flush_cache_one( CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask), phys); } @@ -337,7 +335,7 @@ static void sh4_flush_cache_page(void *args) * kernel has never executed the code through its identity * translation. */ - flush_cache_4096( + flush_cache_one( CACHE_IC_ADDRESS_ARRAY | (address & alias_mask), phys); } @@ -393,7 +391,7 @@ static void sh4_flush_cache_range(void *args) } /** - * __flush_cache_4096 + * __flush_cache_one * * @addr: address in memory mapped cache array * @phys: P1 address to flush (has to match tags if addr has 'A' bit @@ -406,7 +404,7 @@ static void sh4_flush_cache_range(void *args) * operation (purge/write-back) is selected by the lower 2 bits of * 'phys'. */ -static void __flush_cache_4096(unsigned long addr, unsigned long phys, +static void __flush_cache_one(unsigned long addr, unsigned long phys, unsigned long exec_offset) { int way_count; -- cgit v1.1 From 5fb80ae8bd7549034845ebfba694d483070b768b Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 16 Oct 2009 14:38:48 +0900 Subject: sh: disabled cache handling fix. Add code to handle the cache disabled case. Fixes breakage introduced by 37443ef3f0406e855e169c87ae3f4ffb4b6ff635 ("sh: Migrate SH-4 cacheflush ops to function pointers."). Without this patch configuring caches off with CONFIG_CACHE_OFF=y makes kfr2r09 and migo-r lock up in fbdev deferred io or early user space. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/mm/cache.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 5e1091b..a2dc7f9 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -265,6 +265,8 @@ static void __init emit_cache_params(void) void __init cpu_cache_init(void) { + unsigned int cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); + compute_alias(&boot_cpu_data.icache); compute_alias(&boot_cpu_data.dcache); compute_alias(&boot_cpu_data.scache); @@ -273,6 +275,13 @@ void __init cpu_cache_init(void) __flush_purge_region = noop__flush_region; __flush_invalidate_region = noop__flush_region; + /* + * No flushing is necessary in the disabled cache case so we can + * just keep the noop functions in local_flush_..() and __flush_..() + */ + if (unlikely(cache_disabled)) + goto skip; + if (boot_cpu_data.family == CPU_FAMILY_SH2) { extern void __weak sh2_cache_init(void); @@ -312,5 +321,6 @@ void __init cpu_cache_init(void) sh5_cache_init(); } +skip: emit_cache_params(); } -- cgit v1.1 From ffb4a73d8906f71910e6c83ec2b499e70025ee8e Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 27 Oct 2009 07:22:37 +0900 Subject: sh: Fix hugetlbfs dependencies for SH-3 && MMU configurations. The hugetlb dependencies presently depend on SUPERH && MMU while the hugetlb page size definitions depend on CPU_SH4 or CPU_SH5. This unfortunately allows SH-3 + MMU configurations to enable hugetlbfs without a corresponding HPAGE_SHIFT definition, resulting in the build blowing up. As SH-3 doesn't support variable page sizes, we tighten up the dependenies a bit to prevent hugetlbfs from being enabled. These days we also have a shiny new SYS_SUPPORTS_HUGETLBFS, so switch to using that rather than adding to the list of corner cases in fs/Kconfig. Reported-by: Kristoffer Ericson Signed-off-by: Paul Mundt --- arch/sh/mm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 64dc1ad..7f7b52f 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -227,7 +227,7 @@ endchoice choice prompt "HugeTLB page size" - depends on HUGETLB_PAGE && (CPU_SH4 || CPU_SH5) && MMU + depends on HUGETLB_PAGE default HUGETLB_PAGE_SIZE_1MB if PAGE_SIZE_64KB default HUGETLB_PAGE_SIZE_64K -- cgit v1.1 From a9d244a2ff163247b607c4bb64803230ca8f8acb Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Thu, 5 Nov 2009 23:14:39 +0000 Subject: sh: Account for cache aliases in flush_icache_range() The icache may also contain aliases so we must account for them just like we do when manipulating the dcache. We usually get away with aliases in the icache because the instructions that are read from memory are read-only, i.e. they never change. However, the place where this bites us is when the code has been modified. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh4.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 519e2d1..b7f235c 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -72,6 +72,7 @@ static void __uses_jump_to_uncached sh4_flush_icache_range(void *args) for (v = start; v < end; v += L1_CACHE_BYTES) { unsigned long icacheaddr; + int j, n; __ocbwb(v); @@ -79,8 +80,10 @@ static void __uses_jump_to_uncached sh4_flush_icache_range(void *args) cpu_data->icache.entry_mask); /* Clear i-cache line valid-bit */ + n = boot_cpu_data.icache.n_aliases; for (i = 0; i < cpu_data->icache.ways; i++) { - __raw_writel(0, icacheaddr); + for (j = 0; j < n; j++) + __raw_writel(0, icacheaddr + (j * PAGE_SIZE)); icacheaddr += cpu_data->icache.way_incr; } } -- cgit v1.1