diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/arm/mm/fault-armv.c | |
download | op-kernel-dev-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.zip op-kernel-dev-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.gz |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/arm/mm/fault-armv.c')
-rw-r--r-- | arch/arm/mm/fault-armv.c | 223 |
1 files changed, 223 insertions, 0 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c new file mode 100644 index 0000000..01967dd --- /dev/null +++ b/arch/arm/mm/fault-armv.c @@ -0,0 +1,223 @@ +/* + * linux/arch/arm/mm/fault-armv.c + * + * Copyright (C) 1995 Linus Torvalds + * Modifications for ARM processor (c) 1995-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/bitops.h> +#include <linux/vmalloc.h> +#include <linux/init.h> +#include <linux/pagemap.h> + +#include <asm/cacheflush.h> +#include <asm/pgtable.h> +#include <asm/tlbflush.h> + +static unsigned long shared_pte_mask = L_PTE_CACHEABLE; + +/* + * We take the easy way out of this problem - we make the + * PTE uncacheable. However, we leave the write buffer on. + */ +static int adjust_pte(struct vm_area_struct *vma, unsigned long address) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte, entry; + int ret = 0; + + pgd = pgd_offset(vma->vm_mm, address); + if (pgd_none(*pgd)) + goto no_pgd; + if (pgd_bad(*pgd)) + goto bad_pgd; + + pmd = pmd_offset(pgd, address); + if (pmd_none(*pmd)) + goto no_pmd; + if (pmd_bad(*pmd)) + goto bad_pmd; + + pte = pte_offset_map(pmd, address); + entry = *pte; + + /* + * If this page isn't present, or is already setup to + * fault (ie, is old), we can safely ignore any issues. + */ + if (pte_present(entry) && pte_val(entry) & shared_pte_mask) { + flush_cache_page(vma, address, pte_pfn(entry)); + pte_val(entry) &= ~shared_pte_mask; + set_pte(pte, entry); + flush_tlb_page(vma, address); + ret = 1; + } + pte_unmap(pte); + return ret; + +bad_pgd: + pgd_ERROR(*pgd); + pgd_clear(pgd); +no_pgd: + return 0; + +bad_pmd: + pmd_ERROR(*pmd); + pmd_clear(pmd); +no_pmd: + return 0; +} + +static void +make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty) +{ + struct address_space *mapping = page_mapping(page); + struct mm_struct *mm = vma->vm_mm; + struct vm_area_struct *mpnt; + struct prio_tree_iter iter; + unsigned long offset; + pgoff_t pgoff; + int aliases = 0; + + if (!mapping) + return; + + pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); + + /* + * If we have any shared mappings that are in the same mm + * space, then we need to handle them specially to maintain + * cache coherency. + */ + flush_dcache_mmap_lock(mapping); + vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { + /* + * If this VMA is not in our MM, we can ignore it. + * Note that we intentionally mask out the VMA + * that we are fixing up. + */ + if (mpnt->vm_mm != mm || mpnt == vma) + continue; + if (!(mpnt->vm_flags & VM_MAYSHARE)) + continue; + offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; + aliases += adjust_pte(mpnt, mpnt->vm_start + offset); + } + flush_dcache_mmap_unlock(mapping); + if (aliases) + adjust_pte(vma, addr); + else + flush_cache_page(vma, addr, page_to_pfn(page)); +} + +/* + * Take care of architecture specific things when placing a new PTE into + * a page table, or changing an existing PTE. Basically, there are two + * things that we need to take care of: + * + * 1. If PG_dcache_dirty is set for the page, we need to ensure + * that any cache entries for the kernels virtual memory + * range are written back to the page. + * 2. If we have multiple shared mappings of the same space in + * an object, we need to deal with the cache aliasing issues. + * + * Note that the page_table_lock will be held. + */ +void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +{ + unsigned long pfn = pte_pfn(pte); + struct page *page; + + if (!pfn_valid(pfn)) + return; + page = pfn_to_page(pfn); + if (page_mapping(page)) { + int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); + + if (dirty) { + /* + * This is our first userspace mapping of this page. + * Ensure that the physical page is coherent with + * the kernel mapping. + * + * FIXME: only need to do this on VIVT and aliasing + * VIPT cache architectures. We can do that + * by choosing whether to set this bit... + */ + __cpuc_flush_dcache_page(page_address(page)); + } + + if (cache_is_vivt()) + make_coherent(vma, addr, page, dirty); + } +} + +/* + * Check whether the write buffer has physical address aliasing + * issues. If it has, we need to avoid them for the case where + * we have several shared mappings of the same object in user + * space. + */ +static int __init check_writebuffer(unsigned long *p1, unsigned long *p2) +{ + register unsigned long zero = 0, one = 1, val; + + local_irq_disable(); + mb(); + *p1 = one; + mb(); + *p2 = zero; + mb(); + val = *p1; + mb(); + local_irq_enable(); + return val != zero; +} + +void __init check_writebuffer_bugs(void) +{ + struct page *page; + const char *reason; + unsigned long v = 1; + + printk(KERN_INFO "CPU: Testing write buffer coherency: "); + + page = alloc_page(GFP_KERNEL); + if (page) { + unsigned long *p1, *p2; + pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG| + L_PTE_DIRTY|L_PTE_WRITE| + L_PTE_BUFFERABLE); + + p1 = vmap(&page, 1, VM_IOREMAP, prot); + p2 = vmap(&page, 1, VM_IOREMAP, prot); + + if (p1 && p2) { + v = check_writebuffer(p1, p2); + reason = "enabling work-around"; + } else { + reason = "unable to map memory\n"; + } + + vunmap(p1); + vunmap(p2); + put_page(page); + } else { + reason = "unable to grab page\n"; + } + + if (v) { + printk("failed, %s\n", reason); + shared_pte_mask |= L_PTE_BUFFERABLE; + } else { + printk("ok\n"); + } +} |