diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 5 | ||||
-rw-r--r-- | arch/sh/mm/cache-debugfs.c | 6 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh3.c | 8 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 77 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh7705.c | 29 | ||||
-rw-r--r-- | arch/sh/mm/fault.c | 87 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 24 | ||||
-rw-r--r-- | arch/sh/mm/ioremap.c | 6 | ||||
-rw-r--r-- | arch/sh/mm/pg-sh4.c | 28 | ||||
-rw-r--r-- | arch/sh/mm/pg-sh7705.c | 37 | ||||
-rw-r--r-- | arch/sh/mm/pmb.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/tlb-flush.c | 101 | ||||
-rw-r--r-- | arch/sh/mm/tlb-nommu.c | 19 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh3.c | 67 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh4.c | 70 |
15 files changed, 260 insertions, 306 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 29f4ee3..6b0d28a 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -20,7 +20,7 @@ config CPU_SH4 bool select CPU_HAS_INTEVT select CPU_HAS_SR_RB - select CPU_HAS_PTEA if !CPU_SUBTYPE_ST40 + select CPU_HAS_PTEA if (!CPU_SUBTYPE_ST40 && !CPU_SH4A) || CPU_SHX2 config CPU_SH4A bool @@ -72,6 +72,7 @@ config CPU_SUBTYPE_SH7705 config CPU_SUBTYPE_SH7706 bool "Support SH7706 processor" select CPU_SH3 + select CPU_HAS_IPR_IRQ help Select SH7706 if you have a 133 Mhz SH-3 HD6417706 CPU. @@ -92,6 +93,7 @@ config CPU_SUBTYPE_SH7708 config CPU_SUBTYPE_SH7709 bool "Support SH7709 processor" select CPU_SH3 + select CPU_HAS_IPR_IRQ select CPU_HAS_PINT_IRQ help Select SH7709 if you have a 80 Mhz SH-3 HD6417709 CPU. @@ -149,6 +151,7 @@ config CPU_SUBTYPE_SH7760 bool "Support SH7760 processor" select CPU_SH4 select CPU_HAS_INTC2_IRQ + select CPU_HAS_IPR_IRQ config CPU_SUBTYPE_SH4_202 bool "Support SH4-202 processor" diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c index e0122bd..de6d2c9 100644 --- a/arch/sh/mm/cache-debugfs.c +++ b/arch/sh/mm/cache-debugfs.c @@ -46,10 +46,10 @@ static int cache_seq_show(struct seq_file *file, void *iter) if (cache_type == CACHE_TYPE_DCACHE) { base = CACHE_OC_ADDRESS_ARRAY; - cache = &cpu_data->dcache; + cache = ¤t_cpu_data.dcache; } else { base = CACHE_IC_ADDRESS_ARRAY; - cache = &cpu_data->icache; + cache = ¤t_cpu_data.icache; } /* @@ -114,7 +114,7 @@ static int cache_debugfs_open(struct inode *inode, struct file *file) return single_open(file, cache_seq_show, inode->i_private); } -static struct file_operations cache_debugfs_fops = { +static const struct file_operations cache_debugfs_fops = { .owner = THIS_MODULE, .open = cache_debugfs_open, .read = seq_read, diff --git a/arch/sh/mm/cache-sh3.c b/arch/sh/mm/cache-sh3.c index 838731f..6d1dbec 100644 --- a/arch/sh/mm/cache-sh3.c +++ b/arch/sh/mm/cache-sh3.c @@ -44,11 +44,11 @@ void __flush_wback_region(void *start, int size) for (v = begin; v < end; v+=L1_CACHE_BYTES) { unsigned long addrstart = CACHE_OC_ADDRESS_ARRAY; - for (j = 0; j < cpu_data->dcache.ways; j++) { + for (j = 0; j < current_cpu_data.dcache.ways; j++) { unsigned long data, addr, p; p = __pa(v); - addr = addrstart | (v & cpu_data->dcache.entry_mask); + addr = addrstart | (v & current_cpu_data.dcache.entry_mask); local_irq_save(flags); data = ctrl_inl(addr); @@ -60,7 +60,7 @@ void __flush_wback_region(void *start, int size) break; } local_irq_restore(flags); - addrstart += cpu_data->dcache.way_incr; + addrstart += current_cpu_data.dcache.way_incr; } } } @@ -85,7 +85,7 @@ void __flush_purge_region(void *start, int size) data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */ addr = CACHE_OC_ADDRESS_ARRAY | - (v & cpu_data->dcache.entry_mask) | SH_CACHE_ASSOC; + (v & current_cpu_data.dcache.entry_mask) | SH_CACHE_ASSOC; ctrl_outl(data, addr); } } diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index c695515..e0cd4b7 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -54,21 +54,21 @@ static void __init emit_cache_params(void) ctrl_inl(CCN_CVR), ctrl_inl(CCN_PRR)); printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n", - cpu_data->icache.ways, - cpu_data->icache.sets, - cpu_data->icache.way_incr); + current_cpu_data.icache.ways, + current_cpu_data.icache.sets, + current_cpu_data.icache.way_incr); printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", - cpu_data->icache.entry_mask, - cpu_data->icache.alias_mask, - cpu_data->icache.n_aliases); + current_cpu_data.icache.entry_mask, + current_cpu_data.icache.alias_mask, + current_cpu_data.icache.n_aliases); printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n", - cpu_data->dcache.ways, - cpu_data->dcache.sets, - cpu_data->dcache.way_incr); + current_cpu_data.dcache.ways, + current_cpu_data.dcache.sets, + current_cpu_data.dcache.way_incr); printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", - cpu_data->dcache.entry_mask, - cpu_data->dcache.alias_mask, - cpu_data->dcache.n_aliases); + current_cpu_data.dcache.entry_mask, + current_cpu_data.dcache.alias_mask, + current_cpu_data.dcache.n_aliases); if (!__flush_dcache_segment_fn) panic("unknown number of cache ways\n"); @@ -87,10 +87,10 @@ void __init p3_cache_init(void) { int i; - compute_alias(&cpu_data->icache); - compute_alias(&cpu_data->dcache); + compute_alias(¤t_cpu_data.icache); + compute_alias(¤t_cpu_data.dcache); - switch (cpu_data->dcache.ways) { + switch (current_cpu_data.dcache.ways) { case 1: __flush_dcache_segment_fn = __flush_dcache_segment_1way; break; @@ -110,7 +110,7 @@ void __init p3_cache_init(void) if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL)) panic("%s failed.", __FUNCTION__); - for (i = 0; i < cpu_data->dcache.n_aliases; i++) + for (i = 0; i < current_cpu_data.dcache.n_aliases; i++) mutex_init(&p3map_mutex[i]); } @@ -200,13 +200,14 @@ void flush_cache_sigtramp(unsigned long addr) : /* no output */ : "m" (__m(v))); - index = CACHE_IC_ADDRESS_ARRAY | (v & cpu_data->icache.entry_mask); + index = CACHE_IC_ADDRESS_ARRAY | + (v & current_cpu_data.icache.entry_mask); local_irq_save(flags); jump_to_P2(); - for (i = 0; i < cpu_data->icache.ways; - i++, index += cpu_data->icache.way_incr) + for (i = 0; i < current_cpu_data.icache.ways; + i++, index += current_cpu_data.icache.way_incr) ctrl_outl(0, index); /* Clear out Valid-bit */ back_to_P1(); @@ -223,7 +224,7 @@ static inline void flush_cache_4096(unsigned long start, * All types of SH-4 require PC to be in P2 to operate on the I-cache. * Some types of SH-4 require PC to be in P2 to operate on the D-cache. */ - if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) || + if ((current_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) || (start < CACHE_OC_ADDRESS_ARRAY)) exec_offset = 0x20000000; @@ -236,16 +237,26 @@ static inline void flush_cache_4096(unsigned long start, /* * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) + * + * This uses a lazy write-back on UP, which is explicitly + * disabled on SMP. */ void flush_dcache_page(struct page *page) { - if (test_bit(PG_mapped, &page->flags)) { +#ifndef CONFIG_SMP + struct address_space *mapping = page_mapping(page); + + if (mapping && !mapping_mapped(mapping)) + set_bit(PG_dcache_dirty, &page->flags); + else +#endif + { unsigned long phys = PHYSADDR(page_address(page)); unsigned long addr = CACHE_OC_ADDRESS_ARRAY; int i, n; /* Loop all the D-cache */ - n = cpu_data->dcache.n_aliases; + n = current_cpu_data.dcache.n_aliases; for (i = 0; i < n; i++, addr += 4096) flush_cache_4096(addr, phys); } @@ -277,7 +288,7 @@ static inline void flush_icache_all(void) void flush_dcache_all(void) { - (*__flush_dcache_segment_fn)(0UL, cpu_data->dcache.way_size); + (*__flush_dcache_segment_fn)(0UL, current_cpu_data.dcache.way_size); wmb(); } @@ -291,8 +302,8 @@ static void __flush_cache_mm(struct mm_struct *mm, unsigned long start, unsigned long end) { unsigned long d = 0, p = start & PAGE_MASK; - unsigned long alias_mask = cpu_data->dcache.alias_mask; - unsigned long n_aliases = cpu_data->dcache.n_aliases; + unsigned long alias_mask = current_cpu_data.dcache.alias_mask; + unsigned long n_aliases = current_cpu_data.dcache.n_aliases; unsigned long select_bit; unsigned long all_aliases_mask; unsigned long addr_offset; @@ -379,7 +390,7 @@ void flush_cache_mm(struct mm_struct *mm) * If cache is only 4k-per-way, there are never any 'aliases'. Since * the cache is physically tagged, the data can just be left in there. */ - if (cpu_data->dcache.n_aliases == 0) + if (current_cpu_data.dcache.n_aliases == 0) return; /* @@ -416,7 +427,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long phys = pfn << PAGE_SHIFT; unsigned int alias_mask; - alias_mask = cpu_data->dcache.alias_mask; + alias_mask = current_cpu_data.dcache.alias_mask; /* We only need to flush D-cache when we have alias */ if ((address^phys) & alias_mask) { @@ -430,7 +441,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, phys); } - alias_mask = cpu_data->icache.alias_mask; + alias_mask = current_cpu_data.icache.alias_mask; if (vma->vm_flags & VM_EXEC) { /* * Evict entries from the portion of the cache from which code @@ -462,7 +473,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, * If cache is only 4k-per-way, there are never any 'aliases'. Since * the cache is physically tagged, the data can just be left in there. */ - if (cpu_data->dcache.n_aliases == 0) + if (current_cpu_data.dcache.n_aliases == 0) return; /* @@ -523,7 +534,7 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys, unsigned long a, ea, p; unsigned long temp_pc; - dcache = &cpu_data->dcache; + dcache = ¤t_cpu_data.dcache; /* Write this way for better assembly. */ way_count = dcache->ways; way_incr = dcache->way_incr; @@ -598,7 +609,7 @@ static void __flush_dcache_segment_1way(unsigned long start, base_addr = ((base_addr >> 16) << 16); base_addr |= start; - dcache = &cpu_data->dcache; + dcache = ¤t_cpu_data.dcache; linesz = dcache->linesz; way_incr = dcache->way_incr; way_size = dcache->way_size; @@ -640,7 +651,7 @@ static void __flush_dcache_segment_2way(unsigned long start, base_addr = ((base_addr >> 16) << 16); base_addr |= start; - dcache = &cpu_data->dcache; + dcache = ¤t_cpu_data.dcache; linesz = dcache->linesz; way_incr = dcache->way_incr; way_size = dcache->way_size; @@ -699,7 +710,7 @@ static void __flush_dcache_segment_4way(unsigned long start, base_addr = ((base_addr >> 16) << 16); base_addr |= start; - dcache = &cpu_data->dcache; + dcache = ¤t_cpu_data.dcache; linesz = dcache->linesz; way_incr = dcache->way_incr; way_size = dcache->way_size; diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 045abdf..31f8deb 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -3,11 +3,11 @@ * * Copyright (C) 1999, 2000 Niibe Yutaka * Copyright (C) 2004 Alex Song + * Copyright (C) 2006 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. - * */ #include <linux/init.h> #include <linux/mman.h> @@ -32,9 +32,9 @@ static inline void cache_wback_all(void) { unsigned long ways, waysize, addrstart; - ways = cpu_data->dcache.ways; - waysize = cpu_data->dcache.sets; - waysize <<= cpu_data->dcache.entry_shift; + ways = current_cpu_data.dcache.ways; + waysize = current_cpu_data.dcache.sets; + waysize <<= current_cpu_data.dcache.entry_shift; addrstart = CACHE_OC_ADDRESS_ARRAY; @@ -43,7 +43,7 @@ static inline void cache_wback_all(void) for (addr = addrstart; addr < addrstart + waysize; - addr += cpu_data->dcache.linesz) { + addr += current_cpu_data.dcache.linesz) { unsigned long data; int v = SH_CACHE_UPDATED | SH_CACHE_VALID; @@ -51,10 +51,9 @@ static inline void cache_wback_all(void) if ((data & v) == v) ctrl_outl(data & ~v, addr); - } - addrstart += cpu_data->dcache.way_incr; + addrstart += current_cpu_data.dcache.way_incr; } while (--ways); } @@ -94,9 +93,9 @@ static void __flush_dcache_page(unsigned long phys) local_irq_save(flags); jump_to_P2(); - ways = cpu_data->dcache.ways; - waysize = cpu_data->dcache.sets; - waysize <<= cpu_data->dcache.entry_shift; + ways = current_cpu_data.dcache.ways; + waysize = current_cpu_data.dcache.sets; + waysize <<= current_cpu_data.dcache.entry_shift; addrstart = CACHE_OC_ADDRESS_ARRAY; @@ -105,7 +104,7 @@ static void __flush_dcache_page(unsigned long phys) for (addr = addrstart; addr < addrstart + waysize; - addr += cpu_data->dcache.linesz) { + addr += current_cpu_data.dcache.linesz) { unsigned long data; data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID); @@ -115,7 +114,7 @@ static void __flush_dcache_page(unsigned long phys) } } - addrstart += cpu_data->dcache.way_incr; + addrstart += current_cpu_data.dcache.way_incr; } while (--ways); back_to_P1(); @@ -128,7 +127,11 @@ static void __flush_dcache_page(unsigned long phys) */ void flush_dcache_page(struct page *page) { - if (test_bit(PG_mapped, &page->flags)) + struct address_space *mapping = page_mapping(page); + + if (mapping && !mapping_mapped(mapping)) + set_bit(PG_dcache_dirty, &page->flags); + else __flush_dcache_page(PHYSADDR(page_address(page))); } diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 716ebf5..fa5d7f0 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -17,6 +17,7 @@ #include <linux/kprobes.h> #include <asm/system.h> #include <asm/mmu_context.h> +#include <asm/tlbflush.h> #include <asm/kgdb.h> extern void die(const char *,struct pt_regs *,long); @@ -224,3 +225,89 @@ do_sigbus: if (!user_mode(regs)) goto no_context; } + +#ifdef CONFIG_SH_STORE_QUEUES +/* + * This is a special case for the SH-4 store queues, as pages for this + * space still need to be faulted in before it's possible to flush the + * store queue cache for writeout to the remapped region. + */ +#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) +#else +#define P3_ADDR_MAX P4SEG +#endif + +/* + * Called with interrupts disabled. + */ +asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, + unsigned long writeaccess, + unsigned long address) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + pte_t entry; + struct mm_struct *mm = current->mm; + spinlock_t *ptl; + int ret = 1; + +#ifdef CONFIG_SH_KGDB + if (kgdb_nofault && kgdb_bus_err_hook) + kgdb_bus_err_hook(); +#endif + + /* + * We don't take page faults for P1, P2, and parts of P4, these + * are always mapped, whether it be due to legacy behaviour in + * 29-bit mode, or due to PMB configuration in 32-bit mode. + */ + if (address >= P3SEG && address < P3_ADDR_MAX) { + pgd = pgd_offset_k(address); + mm = NULL; + } else { + if (unlikely(address >= TASK_SIZE || !mm)) + return 1; + + pgd = pgd_offset(mm, address); + } + + pud = pud_offset(pgd, address); + if (pud_none_or_clear_bad(pud)) + return 1; + pmd = pmd_offset(pud, address); + if (pmd_none_or_clear_bad(pmd)) + return 1; + + if (mm) + pte = pte_offset_map_lock(mm, pmd, address, &ptl); + else + pte = pte_offset_kernel(pmd, address); + + entry = *pte; + if (unlikely(pte_none(entry) || pte_not_present(entry))) + goto unlock; + if (unlikely(writeaccess && !pte_write(entry))) + goto unlock; + + if (writeaccess) + entry = pte_mkdirty(entry); + entry = pte_mkyoung(entry); + +#ifdef CONFIG_CPU_SH4 + /* + * ITLB is not affected by "ldtlb" instruction. + * So, we need to flush the entry by ourselves. + */ + local_flush_tlb_one(get_asid(), address & PAGE_MASK); +#endif + + set_pte(pte, entry); + update_mmu_cache(NULL, address, entry); + ret = 0; +unlock: + if (mm) + pte_unmap_unlock(pte, ptl); + return ret; +} diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 29bd37b..ae957a9 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -39,11 +39,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); pgd_t swapper_pg_dir[PTRS_PER_PGD]; -/* - * Cache of MMU context last used. - */ -unsigned long mmu_context_cache = NO_CONTEXT; - #ifdef CONFIG_MMU /* It'd be good if these lines were in the standard header file. */ #define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) @@ -111,7 +106,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); - __flush_tlb_page(get_asid(), addr); + flush_tlb_one(get_asid(), addr); } /* @@ -158,7 +153,6 @@ void __init paging_init(void) * Setup some defaults for the zone sizes.. these should be safe * regardless of distcontiguous memory or MMU settings. */ - zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT; zones_size[ZONE_NORMAL] = __MEMORY_SIZE >> PAGE_SHIFT; #ifdef CONFIG_HIGHMEM zones_size[ZONE_HIGHMEM] = 0 >> PAGE_SHIFT; @@ -170,8 +164,6 @@ void __init paging_init(void) * the zone sizes accordingly, in addition to turning it on. */ { - unsigned long max_dma, low, start_pfn; - /* We don't need to map the kernel through the TLB, as * it is permanatly mapped using P1. So clear the * entire pgd. */ @@ -179,19 +171,7 @@ void __init paging_init(void) /* Turn on the MMU */ enable_mmu(); - - /* Fixup the zone sizes */ - start_pfn = START_PFN; - max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; - low = MAX_LOW_PFN; - - if (low < max_dma) { - zones_size[ZONE_DMA] = low - start_pfn; - zones_size[ZONE_NORMAL] = 0; - } else { - zones_size[ZONE_DMA] = max_dma - start_pfn; - zones_size[ZONE_NORMAL] = low - max_dma; - } + zones_size[ZONE_NORMAL] = MAX_LOW_PFN - START_PFN; } /* Set an initial value for the MMU.TTB so we don't have to diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 90b494a..be03d74 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -45,12 +45,6 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, return NULL; /* - * Don't remap the low PCI/ISA area, it's always mapped.. - */ - if (phys_addr >= 0xA0000 && last_addr < 0x100000) - return (void __iomem *)phys_to_virt(phys_addr); - - /* * If we're on an SH7751 or SH7780 PCI controller, PCI memory is * mapped at the end of the address space (typically 0xfd000000) * in a non-translatable area, so mapping through page tables for diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index 3f98d2a..969efece 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c @@ -13,7 +13,7 @@ extern struct mutex p3map_mutex[]; -#define CACHE_ALIAS (cpu_data->dcache.alias_mask) +#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask) /* * clear_user_page @@ -23,7 +23,6 @@ extern struct mutex p3map_mutex[]; */ void clear_user_page(void *to, unsigned long address, struct page *page) { - __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) clear_page(to); else { @@ -40,7 +39,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page) mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); - __flush_tlb_page(get_asid(), p3_addr); + flush_tlb_one(get_asid(), p3_addr); local_irq_restore(flags); update_mmu_cache(NULL, p3_addr, entry); __clear_user_page((void *)p3_addr, to); @@ -59,7 +58,6 @@ void clear_user_page(void *to, unsigned long address, struct page *page) void copy_user_page(void *to, void *from, unsigned long address, struct page *page) { - __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) copy_page(to, from); else { @@ -76,7 +74,7 @@ void copy_user_page(void *to, void *from, unsigned long address, mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); - __flush_tlb_page(get_asid(), p3_addr); + flush_tlb_one(get_asid(), p3_addr); local_irq_restore(flags); update_mmu_cache(NULL, p3_addr, entry); __copy_user_page((void *)p3_addr, from, to); @@ -84,23 +82,3 @@ void copy_user_page(void *to, void *from, unsigned long address, mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); } } - -/* - * For SH-4, we have our own implementation for ptep_get_and_clear - */ -inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - pte_t pte = *ptep; - - pte_clear(mm, addr, ptep); - if (!pte_not_present(pte)) { - unsigned long pfn = pte_pfn(pte); - if (pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); - struct address_space *mapping = page_mapping(page); - if (!mapping || !mapping_writably_mapped(mapping)) - __clear_bit(PG_mapped, &page->flags); - } - } - return pte; -} diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c index ff9ece9..887ab9d 100644 --- a/arch/sh/mm/pg-sh7705.c +++ b/arch/sh/mm/pg-sh7705.c @@ -7,9 +7,7 @@ * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. - * */ - #include <linux/init.h> #include <linux/mman.h> #include <linux/mm.h> @@ -45,13 +43,13 @@ static inline void __flush_purge_virtual_region(void *p1, void *virt, int size) p = __pa(p1_begin); - ways = cpu_data->dcache.ways; + ways = current_cpu_data.dcache.ways; addr = CACHE_OC_ADDRESS_ARRAY; do { unsigned long data; - addr |= (v & cpu_data->dcache.entry_mask); + addr |= (v & current_cpu_data.dcache.entry_mask); data = ctrl_inl(addr); if ((data & CACHE_PHYSADDR_MASK) == @@ -60,7 +58,7 @@ static inline void __flush_purge_virtual_region(void *p1, void *virt, int size) ctrl_outl(data, addr); } - addr += cpu_data->dcache.way_incr; + addr += current_cpu_data.dcache.way_incr; } while (--ways); p1_begin += L1_CACHE_BYTES; @@ -76,7 +74,6 @@ void clear_user_page(void *to, unsigned long address, struct page *pg) { struct page *page = virt_to_page(to); - __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { clear_page(to); __flush_wback_region(to, PAGE_SIZE); @@ -95,12 +92,11 @@ void clear_user_page(void *to, unsigned long address, struct page *pg) * @from: P1 address * @address: U0 address to be mapped */ -void copy_user_page(void *to, void *from, unsigned long address, struct page *pg) +void copy_user_page(void *to, void *from, unsigned long address, + struct page *pg) { struct page *page = virt_to_page(to); - - __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { copy_page(to, from); __flush_wback_region(to, PAGE_SIZE); @@ -112,26 +108,3 @@ void copy_user_page(void *to, void *from, unsigned long address, struct page *pg __flush_wback_region(to, PAGE_SIZE); } } - -/* - * For SH7705, we have our own implementation for ptep_get_and_clear - * Copied from pg-sh4.c - */ -inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - pte_t pte = *ptep; - - pte_clear(mm, addr, ptep); - if (!pte_not_present(pte)) { - unsigned long pfn = pte_pfn(pte); - if (pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); - struct address_space *mapping = page_mapping(page); - if (!mapping || !mapping_writably_mapped(mapping)) - __clear_bit(PG_mapped, &page->flags); - } - } - - return pte; -} - diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index b60ad83..d0d45e2 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -378,7 +378,7 @@ static int pmb_debugfs_open(struct inode *inode, struct file *file) return single_open(file, pmb_seq_show, NULL); } -static struct file_operations pmb_debugfs_fops = { +static const struct file_operations pmb_debugfs_fops = { .owner = THIS_MODULE, .open = pmb_debugfs_open, .read = seq_read, diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c index 73ec7f6..d2f7b4a 100644 --- a/arch/sh/mm/tlb-flush.c +++ b/arch/sh/mm/tlb-flush.c @@ -2,24 +2,28 @@ * TLB flushing operations for SH with an MMU. * * Copyright (C) 1999 Niibe Yutaka - * Copyright (C) 2003 Paul Mundt + * Copyright (C) 2003 - 2006 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/mm.h> +#include <linux/io.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> +#include <asm/cacheflush.h> -void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { - if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) { + unsigned int cpu = smp_processor_id(); + + if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { unsigned long flags; unsigned long asid; unsigned long saved_asid = MMU_NO_ASID; - asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK; + asid = cpu_asid(cpu, vma->vm_mm); page &= PAGE_MASK; local_irq_save(flags); @@ -27,33 +31,34 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) saved_asid = get_asid(); set_asid(asid); } - __flush_tlb_page(asid, page); + local_flush_tlb_one(asid, page); if (saved_asid != MMU_NO_ASID) set_asid(saved_asid); local_irq_restore(flags); } } -void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) +void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) { struct mm_struct *mm = vma->vm_mm; + unsigned int cpu = smp_processor_id(); - if (mm->context.id != NO_CONTEXT) { + if (cpu_context(cpu, mm) != NO_CONTEXT) { unsigned long flags; int size; local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ - mm->context.id = NO_CONTEXT; + cpu_context(cpu, mm) = NO_CONTEXT; if (mm == current->mm) - activate_context(mm); + activate_context(mm, cpu); } else { unsigned long asid; unsigned long saved_asid = MMU_NO_ASID; - asid = mm->context.id & MMU_CONTEXT_ASID_MASK; + asid = cpu_asid(cpu, mm); start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; @@ -62,7 +67,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, set_asid(asid); } while (start < end) { - __flush_tlb_page(asid, start); + local_flush_tlb_one(asid, start); start += PAGE_SIZE; } if (saved_asid != MMU_NO_ASID) @@ -72,26 +77,27 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, } } -void flush_tlb_kernel_range(unsigned long start, unsigned long end) +void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { + unsigned int cpu = smp_processor_id(); unsigned long flags; int size; local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ - flush_tlb_all(); + local_flush_tlb_all(); } else { unsigned long asid; unsigned long saved_asid = get_asid(); - asid = init_mm.context.id & MMU_CONTEXT_ASID_MASK; + asid = cpu_asid(cpu, &init_mm); start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; set_asid(asid); while (start < end) { - __flush_tlb_page(asid, start); + local_flush_tlb_one(asid, start); start += PAGE_SIZE; } set_asid(saved_asid); @@ -99,22 +105,24 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) local_irq_restore(flags); } -void flush_tlb_mm(struct mm_struct *mm) +void local_flush_tlb_mm(struct mm_struct *mm) { + unsigned int cpu = smp_processor_id(); + /* Invalidate all TLB of this process. */ /* Instead of invalidating each TLB, we get new MMU context. */ - if (mm->context.id != NO_CONTEXT) { + if (cpu_context(cpu, mm) != NO_CONTEXT) { unsigned long flags; local_irq_save(flags); - mm->context.id = NO_CONTEXT; + cpu_context(cpu, mm) = NO_CONTEXT; if (mm == current->mm) - activate_context(mm); + activate_context(mm, cpu); local_irq_restore(flags); } } -void flush_tlb_all(void) +void local_flush_tlb_all(void) { unsigned long flags, status; @@ -132,3 +140,54 @@ void flush_tlb_all(void) ctrl_barrier(); local_irq_restore(flags); } + +void update_mmu_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte) +{ + unsigned long flags; + unsigned long pteval; + unsigned long vpn; + struct page *page; + unsigned long pfn = pte_pfn(pte); + struct address_space *mapping; + + if (!pfn_valid(pfn)) + return; + + page = pfn_to_page(pfn); + mapping = page_mapping(page); + if (mapping) { + unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; + int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); + + if (dirty) + __flush_wback_region((void *)P1SEGADDR(phys), + PAGE_SIZE); + } + + local_irq_save(flags); + + /* Set PTEH register */ + vpn = (address & MMU_VPN_MASK) | get_asid(); + ctrl_outl(vpn, MMU_PTEH); + + pteval = pte_val(pte); + +#ifdef CONFIG_CPU_HAS_PTEA + /* Set PTEA register */ + /* TODO: make this look less hacky */ + ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA); +#endif + + /* Set PTEL register */ + pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ +#if defined(CONFIG_SH_WRITETHROUGH) && defined(CONFIG_CPU_SH4) + pteval |= _PAGE_WT; +#endif + /* conveniently, we want all the software flags to be 0 anyway */ + ctrl_outl(pteval, MMU_PTEL); + + /* Load the TLB */ + asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); + local_irq_restore(flags); +} diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/tlb-nommu.c index e55cfea..1ccca7c 100644 --- a/arch/sh/mm/tlb-nommu.c +++ b/arch/sh/mm/tlb-nommu.c @@ -13,39 +13,33 @@ /* * Nothing too terribly exciting here .. */ - -void flush_tlb(void) -{ - BUG(); -} - -void flush_tlb_all(void) +void local_flush_tlb_all(void) { BUG(); } -void flush_tlb_mm(struct mm_struct *mm) +void local_flush_tlb_mm(struct mm_struct *mm) { BUG(); } -void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, +void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { BUG(); } -void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { BUG(); } -void __flush_tlb_page(unsigned long asid, unsigned long page) +void local_flush_tlb_one(unsigned long asid, unsigned long page) { BUG(); } -void flush_tlb_kernel_range(unsigned long start, unsigned long end) +void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { BUG(); } @@ -55,4 +49,3 @@ void update_mmu_cache(struct vm_area_struct * vma, { BUG(); } - diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c index 46b09e2..e5e76eb 100644 --- a/arch/sh/mm/tlb-sh3.c +++ b/arch/sh/mm/tlb-sh3.c @@ -8,71 +8,11 @@ * * Released under the terms of the GNU GPL v2.0. */ -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/ptrace.h> -#include <linux/mman.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/smp_lock.h> -#include <linux/interrupt.h> - +#include <linux/io.h> #include <asm/system.h> -#include <asm/io.h> -#include <asm/uaccess.h> -#include <asm/pgalloc.h> #include <asm/mmu_context.h> -#include <asm/cacheflush.h> -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) -{ - unsigned long flags; - unsigned long pteval; - unsigned long vpn; - - /* Ptrace may call this routine. */ - if (vma && current->active_mm != vma->vm_mm) - return; - -#if defined(CONFIG_SH7705_CACHE_32KB) - { - struct page *page = pte_page(pte); - unsigned long pfn = pte_pfn(pte); - - if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) { - unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; - - __flush_wback_region((void *)P1SEGADDR(phys), - PAGE_SIZE); - __set_bit(PG_mapped, &page->flags); - } - } -#endif - - local_irq_save(flags); - - /* Set PTEH register */ - vpn = (address & MMU_VPN_MASK) | get_asid(); - ctrl_outl(vpn, MMU_PTEH); - - pteval = pte_val(pte); - - /* Set PTEL register */ - pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ - /* conveniently, we want all the software flags to be 0 anyway */ - ctrl_outl(pteval, MMU_PTEL); - - /* Load the TLB */ - asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); - local_irq_restore(flags); -} - -void __flush_tlb_page(unsigned long asid, unsigned long page) +void local_flush_tlb_one(unsigned long asid, unsigned long page) { unsigned long addr, data; int i, ways = MMU_NTLB_WAYS; @@ -86,7 +26,7 @@ void __flush_tlb_page(unsigned long asid, unsigned long page) addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000); data = (page & 0xfffe0000) | asid; /* VALID bit is off */ - if ((cpu_data->flags & CPU_HAS_MMU_PAGE_ASSOC)) { + if ((current_cpu_data.flags & CPU_HAS_MMU_PAGE_ASSOC)) { addr |= MMU_PAGE_ASSOC_BIT; ways = 1; /* we already know the way .. */ } @@ -94,4 +34,3 @@ void __flush_tlb_page(unsigned long asid, unsigned long page) for (i = 0; i < ways; i++) ctrl_outl(data, addr + (i << 8)); } - diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index 812b2d5..221e709 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c @@ -8,76 +8,11 @@ * * Released under the terms of the GNU GPL v2.0. */ -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/ptrace.h> -#include <linux/mman.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/smp_lock.h> -#include <linux/interrupt.h> - +#include <linux/io.h> #include <asm/system.h> -#include <asm/io.h> -#include <asm/uaccess.h> -#include <asm/pgalloc.h> #include <asm/mmu_context.h> -#include <asm/cacheflush.h> -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) -{ - unsigned long flags; - unsigned long pteval; - unsigned long vpn; - struct page *page; - unsigned long pfn; - - /* Ptrace may call this routine. */ - if (vma && current->active_mm != vma->vm_mm) - return; - - pfn = pte_pfn(pte); - if (pfn_valid(pfn)) { - page = pfn_to_page(pfn); - if (!test_bit(PG_mapped, &page->flags)) { - unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; - __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE); - __set_bit(PG_mapped, &page->flags); - } - } - - local_irq_save(flags); - - /* Set PTEH register */ - vpn = (address & MMU_VPN_MASK) | get_asid(); - ctrl_outl(vpn, MMU_PTEH); - - pteval = pte_val(pte); - - /* Set PTEA register */ - if (cpu_data->flags & CPU_HAS_PTEA) - /* TODO: make this look less hacky */ - ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA); - - /* Set PTEL register */ - pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ -#ifdef CONFIG_SH_WRITETHROUGH - pteval |= _PAGE_WT; -#endif - /* conveniently, we want all the software flags to be 0 anyway */ - ctrl_outl(pteval, MMU_PTEL); - - /* Load the TLB */ - asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); - local_irq_restore(flags); -} - -void __flush_tlb_page(unsigned long asid, unsigned long page) +void local_flush_tlb_one(unsigned long asid, unsigned long page) { unsigned long addr, data; @@ -93,4 +28,3 @@ void __flush_tlb_page(unsigned long asid, unsigned long page) ctrl_outl(data, addr); back_to_P1(); } - |