diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 6 | ||||
-rw-r--r-- | arch/sh/mm/Makefile | 5 | ||||
-rw-r--r-- | arch/sh/mm/asids-debugfs.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/cache-debugfs.c | 10 | ||||
-rw-r--r-- | arch/sh/mm/consistent.c | 18 | ||||
-rw-r--r-- | arch/sh/mm/gup.c | 273 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 52 | ||||
-rw-r--r-- | arch/sh/mm/nommu.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/pmb.c | 35 | ||||
-rw-r--r-- | arch/sh/mm/sram.c | 34 | ||||
-rw-r--r-- | arch/sh/mm/tlb-debugfs.c | 11 | ||||
-rw-r--r-- | arch/sh/mm/tlbflush_32.c | 16 | ||||
-rw-r--r-- | arch/sh/mm/tlbflush_64.c | 5 | ||||
-rw-r--r-- | arch/sh/mm/uncached.c | 2 |
14 files changed, 409 insertions, 64 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 1445ca6..c3e61b3 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -79,7 +79,7 @@ config 29BIT config 32BIT bool - default y if CPU_SH5 + default y if CPU_SH5 || !MMU config PMB bool "Support 32-bit physical addressing through PMB" @@ -168,6 +168,10 @@ config IOREMAP_FIXED config UNCACHED_MAPPING bool +config HAVE_SRAM_POOL + bool + select GENERIC_ALLOCATOR + choice prompt "Kernel page size" default PAGE_SIZE_4KB diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 53f7c68..150aa32 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -15,7 +15,7 @@ cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o obj-y += $(cacheops-y) mmu-y := nommu.o extable_32.o -mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \ +mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o gup.o \ ioremap.o kmap.o pgtable.o tlbflush_$(BITS).o obj-y += $(mmu-y) @@ -40,6 +40,7 @@ obj-$(CONFIG_PMB) += pmb.o obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o +obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o # Special flags for fault_64.o. This puts restrictions on the number of # caller-save registers that the compiler can target when building this file. @@ -66,4 +67,4 @@ CFLAGS_fault_64.o += -ffixed-r7 \ -ffixed-r60 -ffixed-r61 -ffixed-r62 \ -fomit-frame-pointer -EXTRA_CFLAGS += -Werror +ccflags-y := -Werror diff --git a/arch/sh/mm/asids-debugfs.c b/arch/sh/mm/asids-debugfs.c index cd8c3bf3..74c03ec 100644 --- a/arch/sh/mm/asids-debugfs.c +++ b/arch/sh/mm/asids-debugfs.c @@ -63,7 +63,7 @@ static int __init asids_debugfs_init(void) { struct dentry *asids_dentry; - asids_dentry = debugfs_create_file("asids", S_IRUSR, sh_debugfs_root, + asids_dentry = debugfs_create_file("asids", S_IRUSR, arch_debugfs_dir, NULL, &asids_debugfs_fops); if (!asids_dentry) return -ENOMEM; diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c index 690ed01..5241146 100644 --- a/arch/sh/mm/cache-debugfs.c +++ b/arch/sh/mm/cache-debugfs.c @@ -126,25 +126,19 @@ static int __init cache_debugfs_init(void) { struct dentry *dcache_dentry, *icache_dentry; - dcache_dentry = debugfs_create_file("dcache", S_IRUSR, sh_debugfs_root, + dcache_dentry = debugfs_create_file("dcache", S_IRUSR, arch_debugfs_dir, (unsigned int *)CACHE_TYPE_DCACHE, &cache_debugfs_fops); if (!dcache_dentry) return -ENOMEM; - if (IS_ERR(dcache_dentry)) - return PTR_ERR(dcache_dentry); - icache_dentry = debugfs_create_file("icache", S_IRUSR, sh_debugfs_root, + icache_dentry = debugfs_create_file("icache", S_IRUSR, arch_debugfs_dir, (unsigned int *)CACHE_TYPE_ICACHE, &cache_debugfs_fops); if (!icache_dentry) { debugfs_remove(dcache_dentry); return -ENOMEM; } - if (IS_ERR(icache_dentry)) { - debugfs_remove(dcache_dentry); - return PTR_ERR(icache_dentry); - } return 0; } diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index c86a085..40733a9 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -38,11 +38,12 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, void *ret, *ret_nocache; int order = get_order(size); + gfp |= __GFP_ZERO; + ret = (void *)__get_free_pages(gfp, order); if (!ret) return NULL; - memset(ret, 0, size); /* * Pages from the page allocator may have data present in * cache. So flush the cache before using uncached memory. @@ -78,21 +79,20 @@ void dma_generic_free_coherent(struct device *dev, size_t size, void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { -#if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB) - void *p1addr = vaddr; -#else - void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); -#endif + void *addr; + + addr = __in_29bit_mode() ? + (void *)P1SEGADDR((unsigned long)vaddr) : vaddr; switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ - __flush_invalidate_region(p1addr, size); + __flush_invalidate_region(addr, size); break; case DMA_TO_DEVICE: /* writeback only */ - __flush_wback_region(p1addr, size); + __flush_wback_region(addr, size); break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - __flush_purge_region(p1addr, size); + __flush_purge_region(addr, size); break; default: BUG(); diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c new file mode 100644 index 0000000..bf8daf9 --- /dev/null +++ b/arch/sh/mm/gup.c @@ -0,0 +1,273 @@ +/* + * Lockless get_user_pages_fast for SuperH + * + * Copyright (C) 2009 - 2010 Paul Mundt + * + * Cloned from the x86 and PowerPC versions, by: + * + * Copyright (C) 2008 Nick Piggin + * Copyright (C) 2008 Novell Inc. + */ +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/vmstat.h> +#include <linux/highmem.h> +#include <asm/pgtable.h> + +static inline pte_t gup_get_pte(pte_t *ptep) +{ +#ifndef CONFIG_X2TLB + return ACCESS_ONCE(*ptep); +#else + /* + * With get_user_pages_fast, we walk down the pagetables without + * taking any locks. For this we would like to load the pointers + * atomically, but that is not possible with 64-bit PTEs. What + * we do have is the guarantee that a pte will only either go + * from not present to present, or present to not present or both + * -- it will not switch to a completely different present page + * without a TLB flush in between; something that we are blocking + * by holding interrupts off. + * + * Setting ptes from not present to present goes: + * ptep->pte_high = h; + * smp_wmb(); + * ptep->pte_low = l; + * + * And present to not present goes: + * ptep->pte_low = 0; + * smp_wmb(); + * ptep->pte_high = 0; + * + * We must ensure here that the load of pte_low sees l iff pte_high + * sees h. We load pte_high *after* loading pte_low, which ensures we + * don't see an older value of pte_high. *Then* we recheck pte_low, + * which ensures that we haven't picked up a changed pte high. We might + * have got rubbish values from pte_low and pte_high, but we are + * guaranteed that pte_low will not have the present bit set *unless* + * it is 'l'. And get_user_pages_fast only operates on present ptes, so + * we're safe. + * + * gup_get_pte should not be used or copied outside gup.c without being + * very careful -- it does not atomically load the pte or anything that + * is likely to be useful for you. + */ + pte_t pte; + +retry: + pte.pte_low = ptep->pte_low; + smp_rmb(); + pte.pte_high = ptep->pte_high; + smp_rmb(); + if (unlikely(pte.pte_low != ptep->pte_low)) + goto retry; + + return pte; +#endif +} + +/* + * The performance critical leaf functions are made noinline otherwise gcc + * inlines everything into a single function which results in too much + * register pressure. + */ +static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, + unsigned long end, int write, struct page **pages, int *nr) +{ + u64 mask, result; + pte_t *ptep; + +#ifdef CONFIG_X2TLB + result = _PAGE_PRESENT | _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ); + if (write) + result |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE); +#elif defined(CONFIG_SUPERH64) + result = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ; + if (write) + result |= _PAGE_WRITE; +#else + result = _PAGE_PRESENT | _PAGE_USER; + if (write) + result |= _PAGE_RW; +#endif + + mask = result | _PAGE_SPECIAL; + + ptep = pte_offset_map(&pmd, addr); + do { + pte_t pte = gup_get_pte(ptep); + struct page *page; + + if ((pte_val(pte) & mask) != result) { + pte_unmap(ptep); + return 0; + } + VM_BUG_ON(!pfn_valid(pte_pfn(pte))); + page = pte_page(pte); + get_page(page); + pages[*nr] = page; + (*nr)++; + + } while (ptep++, addr += PAGE_SIZE, addr != end); + pte_unmap(ptep - 1); + + return 1; +} + +static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + unsigned long next; + pmd_t *pmdp; + + pmdp = pmd_offset(&pud, addr); + do { + pmd_t pmd = *pmdp; + + next = pmd_addr_end(addr, end); + if (pmd_none(pmd)) + return 0; + if (!gup_pte_range(pmd, addr, next, write, pages, nr)) + return 0; + } while (pmdp++, addr = next, addr != end); + + return 1; +} + +static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + unsigned long next; + pud_t *pudp; + + pudp = pud_offset(&pgd, addr); + do { + pud_t pud = *pudp; + + next = pud_addr_end(addr, end); + if (pud_none(pud)) + return 0; + if (!gup_pmd_range(pud, addr, next, write, pages, nr)) + return 0; + } while (pudp++, addr = next, addr != end); + + return 1; +} + +/* + * Like get_user_pages_fast() except its IRQ-safe in that it won't fall + * back to the regular GUP. + */ +int __get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages) +{ + struct mm_struct *mm = current->mm; + unsigned long addr, len, end; + unsigned long next; + unsigned long flags; + pgd_t *pgdp; + int nr = 0; + + start &= PAGE_MASK; + addr = start; + len = (unsigned long) nr_pages << PAGE_SHIFT; + end = start + len; + if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, + (void __user *)start, len))) + return 0; + + /* + * This doesn't prevent pagetable teardown, but does prevent + * the pagetables and pages from being freed. + */ + local_irq_save(flags); + pgdp = pgd_offset(mm, addr); + do { + pgd_t pgd = *pgdp; + + next = pgd_addr_end(addr, end); + if (pgd_none(pgd)) + break; + if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) + break; + } while (pgdp++, addr = next, addr != end); + local_irq_restore(flags); + + return nr; +} + +/** + * get_user_pages_fast() - pin user pages in memory + * @start: starting user address + * @nr_pages: number of pages from start to pin + * @write: whether pages will be written to + * @pages: array that receives pointers to the pages pinned. + * Should be at least nr_pages long. + * + * Attempt to pin user pages in memory without taking mm->mmap_sem. + * If not successful, it will fall back to taking the lock and + * calling get_user_pages(). + * + * Returns number of pages pinned. This may be fewer than the number + * requested. If nr_pages is 0 or negative, returns 0. If no pages + * were pinned, returns -errno. + */ +int get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages) +{ + struct mm_struct *mm = current->mm; + unsigned long addr, len, end; + unsigned long next; + pgd_t *pgdp; + int nr = 0; + + start &= PAGE_MASK; + addr = start; + len = (unsigned long) nr_pages << PAGE_SHIFT; + + end = start + len; + if (end < start) + goto slow_irqon; + + local_irq_disable(); + pgdp = pgd_offset(mm, addr); + do { + pgd_t pgd = *pgdp; + + next = pgd_addr_end(addr, end); + if (pgd_none(pgd)) + goto slow; + if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) + goto slow; + } while (pgdp++, addr = next, addr != end); + local_irq_enable(); + + VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); + return nr; + + { + int ret; + +slow: + local_irq_enable(); +slow_irqon: + /* Try to get the remaining pages with get_user_pages */ + start += nr << PAGE_SHIFT; + pages += nr; + + down_read(&mm->mmap_sem); + ret = get_user_pages(current, mm, start, + (end - start) >> PAGE_SHIFT, write, 0, pages, NULL); + up_read(&mm->mmap_sem); + + /* Have to be a bit careful with return values */ + if (nr > 0) { + if (ret < 0) + ret = nr; + else + ret += nr; + } + + return ret; + } +} diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 552bea5..3385b28 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -47,7 +47,6 @@ static pte_t *__get_pte_phys(unsigned long addr) pgd_t *pgd; pud_t *pud; pmd_t *pmd; - pte_t *pte; pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) { @@ -67,8 +66,7 @@ static pte_t *__get_pte_phys(unsigned long addr) return NULL; } - pte = pte_offset_kernel(pmd, addr); - return pte; + return pte_offset_kernel(pmd, addr); } static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) @@ -125,13 +123,45 @@ void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot) clear_pte_phys(address, prot); } +static pmd_t * __init one_md_table_init(pud_t *pud) +{ + if (pud_none(*pud)) { + pmd_t *pmd; + + pmd = alloc_bootmem_pages(PAGE_SIZE); + pud_populate(&init_mm, pud, pmd); + BUG_ON(pmd != pmd_offset(pud, 0)); + } + + return pmd_offset(pud, 0); +} + +static pte_t * __init one_page_table_init(pmd_t *pmd) +{ + if (pmd_none(*pmd)) { + pte_t *pte; + + pte = alloc_bootmem_pages(PAGE_SIZE); + pmd_populate_kernel(&init_mm, pmd, pte); + BUG_ON(pte != pte_offset_kernel(pmd, 0)); + } + + return pte_offset_kernel(pmd, 0); +} + +static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd, + unsigned long vaddr, pte_t *lastpte) +{ + return pte; +} + void __init page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; - pte_t *pte; + pte_t *pte = NULL; int i, j, k; unsigned long vaddr; @@ -144,19 +174,13 @@ void __init page_table_range_init(unsigned long start, unsigned long end, for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { pud = (pud_t *)pgd; for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { -#ifdef __PAGETABLE_PMD_FOLDED - pmd = (pmd_t *)pud; -#else - pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); - pud_populate(&init_mm, pud, pmd); + pmd = one_md_table_init(pud); +#ifndef __PAGETABLE_PMD_FOLDED pmd += k; #endif for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { - if (pmd_none(*pmd)) { - pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); - pmd_populate_kernel(&init_mm, pmd, pte); - BUG_ON(pte != pte_offset_kernel(pmd, 0)); - } + pte = page_table_kmap_check(one_page_table_init(pmd), + pmd, vaddr, pte); vaddr += PMD_SIZE; } k = 0; diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c index 7694f50..36312d2 100644 --- a/arch/sh/mm/nommu.c +++ b/arch/sh/mm/nommu.c @@ -67,6 +67,10 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) BUG(); } +void __flush_tlb_global(void) +{ +} + void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { } diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 6379091..b20b1b3 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -40,7 +40,7 @@ struct pmb_entry { unsigned long flags; unsigned long size; - spinlock_t lock; + raw_spinlock_t lock; /* * 0 .. NR_PMB_ENTRIES for specific entry selection, or @@ -265,7 +265,7 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, memset(pmbe, 0, sizeof(struct pmb_entry)); - spin_lock_init(&pmbe->lock); + raw_spin_lock_init(&pmbe->lock); pmbe->vpn = vpn; pmbe->ppn = ppn; @@ -327,9 +327,9 @@ static void set_pmb_entry(struct pmb_entry *pmbe) { unsigned long flags; - spin_lock_irqsave(&pmbe->lock, flags); + raw_spin_lock_irqsave(&pmbe->lock, flags); __set_pmb_entry(pmbe); - spin_unlock_irqrestore(&pmbe->lock, flags); + raw_spin_unlock_irqrestore(&pmbe->lock, flags); } #endif /* CONFIG_PM */ @@ -368,7 +368,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, return PTR_ERR(pmbe); } - spin_lock_irqsave(&pmbe->lock, flags); + raw_spin_lock_irqsave(&pmbe->lock, flags); pmbe->size = pmb_sizes[i].size; @@ -383,9 +383,10 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, * entries for easier tear-down. */ if (likely(pmbp)) { - spin_lock(&pmbp->lock); + raw_spin_lock_nested(&pmbp->lock, + SINGLE_DEPTH_NESTING); pmbp->link = pmbe; - spin_unlock(&pmbp->lock); + raw_spin_unlock(&pmbp->lock); } pmbp = pmbe; @@ -398,7 +399,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, i--; mapped++; - spin_unlock_irqrestore(&pmbe->lock, flags); + raw_spin_unlock_irqrestore(&pmbe->lock, flags); } } while (size >= SZ_16M); @@ -627,15 +628,14 @@ static void __init pmb_synchronize(void) continue; } - spin_lock_irqsave(&pmbe->lock, irqflags); + raw_spin_lock_irqsave(&pmbe->lock, irqflags); for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) if (pmb_sizes[j].flag == size) pmbe->size = pmb_sizes[j].size; if (pmbp) { - spin_lock(&pmbp->lock); - + raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING); /* * Compare the previous entry against the current one to * see if the entries span a contiguous mapping. If so, @@ -644,13 +644,12 @@ static void __init pmb_synchronize(void) */ if (pmb_can_merge(pmbp, pmbe)) pmbp->link = pmbe; - - spin_unlock(&pmbp->lock); + raw_spin_unlock(&pmbp->lock); } pmbp = pmbe; - spin_unlock_irqrestore(&pmbe->lock, irqflags); + raw_spin_unlock_irqrestore(&pmbe->lock, irqflags); } } @@ -757,7 +756,7 @@ static void __init pmb_resize(void) /* * Found it, now resize it. */ - spin_lock_irqsave(&pmbe->lock, flags); + raw_spin_lock_irqsave(&pmbe->lock, flags); pmbe->size = SZ_16M; pmbe->flags &= ~PMB_SZ_MASK; @@ -767,7 +766,7 @@ static void __init pmb_resize(void) __set_pmb_entry(pmbe); - spin_unlock_irqrestore(&pmbe->lock, flags); + raw_spin_unlock_irqrestore(&pmbe->lock, flags); } read_unlock(&pmb_rwlock); @@ -866,11 +865,9 @@ static int __init pmb_debugfs_init(void) struct dentry *dentry; dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO, - sh_debugfs_root, NULL, &pmb_debugfs_fops); + arch_debugfs_dir, NULL, &pmb_debugfs_fops); if (!dentry) return -ENOMEM; - if (IS_ERR(dentry)) - return PTR_ERR(dentry); return 0; } diff --git a/arch/sh/mm/sram.c b/arch/sh/mm/sram.c new file mode 100644 index 0000000..bc156ec --- /dev/null +++ b/arch/sh/mm/sram.c @@ -0,0 +1,34 @@ +/* + * SRAM pool for tiny memories not otherwise managed. + * + * Copyright (C) 2010 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/sram.h> + +/* + * This provides a standard SRAM pool for tiny memories that can be + * added either by the CPU or the platform code. Typical SRAM sizes + * to be inserted in to the pool will generally be less than the page + * size, with anything more reasonably sized handled as a NUMA memory + * node. + */ +struct gen_pool *sram_pool; + +static int __init sram_pool_init(void) +{ + /* + * This is a global pool, we don't care about node locality. + */ + sram_pool = gen_pool_create(1, -1); + if (unlikely(!sram_pool)) + return -ENOMEM; + + return 0; +} +core_initcall(sram_pool_init); diff --git a/arch/sh/mm/tlb-debugfs.c b/arch/sh/mm/tlb-debugfs.c index 229bf75..dea637a 100644 --- a/arch/sh/mm/tlb-debugfs.c +++ b/arch/sh/mm/tlb-debugfs.c @@ -151,15 +151,13 @@ static int __init tlb_debugfs_init(void) { struct dentry *itlb, *utlb; - itlb = debugfs_create_file("itlb", S_IRUSR, sh_debugfs_root, + itlb = debugfs_create_file("itlb", S_IRUSR, arch_debugfs_dir, (unsigned int *)TLB_TYPE_ITLB, &tlb_debugfs_fops); if (unlikely(!itlb)) return -ENOMEM; - if (IS_ERR(itlb)) - return PTR_ERR(itlb); - utlb = debugfs_create_file("utlb", S_IRUSR, sh_debugfs_root, + utlb = debugfs_create_file("utlb", S_IRUSR, arch_debugfs_dir, (unsigned int *)TLB_TYPE_UTLB, &tlb_debugfs_fops); if (unlikely(!utlb)) { @@ -167,11 +165,6 @@ static int __init tlb_debugfs_init(void) return -ENOMEM; } - if (IS_ERR(utlb)) { - debugfs_remove(itlb); - return PTR_ERR(utlb); - } - return 0; } module_init(tlb_debugfs_init); diff --git a/arch/sh/mm/tlbflush_32.c b/arch/sh/mm/tlbflush_32.c index 3fbe03c..a6a20d6 100644 --- a/arch/sh/mm/tlbflush_32.c +++ b/arch/sh/mm/tlbflush_32.c @@ -119,3 +119,19 @@ void local_flush_tlb_mm(struct mm_struct *mm) local_irq_restore(flags); } } + +void __flush_tlb_global(void) +{ + unsigned long flags; + + local_irq_save(flags); + + /* + * This is the most destructive of the TLB flushing options, + * and will tear down all of the UTLB/ITLB mappings, including + * wired entries. + */ + __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); + + local_irq_restore(flags); +} diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 03db41c..7f5810f 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -455,6 +455,11 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) flush_tlb_all(); } +void __flush_tlb_global(void) +{ + flush_tlb_all(); +} + void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { } diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c index 8a4eca5..a7767da 100644 --- a/arch/sh/mm/uncached.c +++ b/arch/sh/mm/uncached.c @@ -28,7 +28,7 @@ EXPORT_SYMBOL(virt_addr_uncached); void __init uncached_init(void) { -#ifdef CONFIG_29BIT +#if defined(CONFIG_29BIT) || !defined(CONFIG_MMU) uncached_start = P2SEG; #else uncached_start = memory_end; |